code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
A_ : Any = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
# Return True if there is node that has not iterated.
__UpperCAmelCase = [False] * len(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = [s]
__UpperCAmelCase = True
while queue:
__UpperCAmelCase = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = True
__UpperCAmelCase = u
return visited[t]
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase = [-1] * (len(SCREAMING_SNAKE_CASE ))
__UpperCAmelCase = 0
__UpperCAmelCase = []
__UpperCAmelCase = [i[:] for i in graph] # Record original cut, copy.
while bfs(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = float('''Inf''' )
__UpperCAmelCase = sink
while s != source:
# Find the minimum value in select path
__UpperCAmelCase = min(SCREAMING_SNAKE_CASE , graph[parent[s]][s] )
__UpperCAmelCase = parent[s]
max_flow += path_flow
__UpperCAmelCase = sink
while v != source:
__UpperCAmelCase = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__UpperCAmelCase = parent[v]
for i in range(len(SCREAMING_SNAKE_CASE ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 333 |
def __a ( ) -> list[list[int]]:
'''simple docstring'''
return [list(range(1_0_0_0 - i , -1_0_0_0 - i , -1 ) ) for i in range(1_0_0_0 )]
A_ : Union[str, Any] = generate_large_matrix()
A_ : Union[str, Any] = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __a ( SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
assert all(row == sorted(SCREAMING_SNAKE_CASE , reverse=SCREAMING_SNAKE_CASE ) for row in grid )
assert all(list(SCREAMING_SNAKE_CASE ) == sorted(SCREAMING_SNAKE_CASE , reverse=SCREAMING_SNAKE_CASE ) for col in zip(*SCREAMING_SNAKE_CASE ) )
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__UpperCAmelCase = 0
__UpperCAmelCase = len(SCREAMING_SNAKE_CASE ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__UpperCAmelCase = (left + right) // 2
__UpperCAmelCase = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__UpperCAmelCase = mid + 1
else:
__UpperCAmelCase = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__UpperCAmelCase = 0
__UpperCAmelCase = len(grid[0] )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
__UpperCAmelCase = find_negative_index(grid[i][:bound] )
total += bound
return (len(SCREAMING_SNAKE_CASE ) * len(grid[0] )) - total
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__UpperCAmelCase = 0
for row in grid:
for i, number in enumerate(SCREAMING_SNAKE_CASE ):
if number < 0:
total += len(SCREAMING_SNAKE_CASE ) - i
break
return total
def __a ( ) -> None:
'''simple docstring'''
from timeit import timeit
print('''Running benchmarks''' )
__UpperCAmelCase = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__UpperCAmelCase = timeit(f'''{func}(grid=grid)''' , setup=SCREAMING_SNAKE_CASE , number=5_0_0 )
print(f'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 333 | 1 |
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 1.5
SCREAMING_SNAKE_CASE = int(factor * num_class_images )
SCREAMING_SNAKE_CASE = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=_SCREAMING_SNAKE_CASE , aesthetic_weight=0.1 )
os.makedirs(F"""{class_data_dir}/images""" , exist_ok=_SCREAMING_SNAKE_CASE )
if len(list(Path(F"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
SCREAMING_SNAKE_CASE = client.query(text=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) >= factor * num_class_images or num_images > 1E4:
break
else:
SCREAMING_SNAKE_CASE = int(factor * num_images )
SCREAMING_SNAKE_CASE = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=_SCREAMING_SNAKE_CASE , aesthetic_weight=0.1 , )
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = tqdm(desc="""downloading real regularization images""" , total=_SCREAMING_SNAKE_CASE )
with open(F"""{class_data_dir}/caption.txt""" , """w""" ) as fa, open(F"""{class_data_dir}/urls.txt""" , """w""" ) as fa, open(
F"""{class_data_dir}/images.txt""" , """w""" ) as fa:
while total < num_class_images:
SCREAMING_SNAKE_CASE = class_images[count]
count += 1
try:
SCREAMING_SNAKE_CASE = requests.get(images["""url"""] )
if img.status_code == 2_00:
SCREAMING_SNAKE_CASE = Image.open(BytesIO(img.content ) )
with open(F"""{class_data_dir}/images/{total}.jpg""" , """wb""" ) as f:
f.write(img.content )
fa.write(images["""caption"""] + """\n""" )
fa.write(images["""url"""] + """\n""" )
fa.write(F"""{class_data_dir}/images/{total}.jpg""" + """\n""" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def __lowercase ( ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = argparse.ArgumentParser("""""" , add_help=_SCREAMING_SNAKE_CASE )
parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE )
parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE )
parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=2_00 , type=_SCREAMING_SNAKE_CASE )
return parser.parse_args()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 193 |
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : Any = BertTokenizer
__snake_case : Dict = BertTokenizerFast
__snake_case : Tuple = True
__snake_case : List[Any] = True
__snake_case : Optional[Any] = filter_non_english
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : List[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
SCREAMING_SNAKE_CASE = """unwanted, running"""
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(lowerCamelCase__ ,["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) ,[9, 6, 7, 12, 10, 11] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
# With lower casing
SCREAMING_SNAKE_CASE = self.get_tokenizer(do_lower_case=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer(do_lower_case=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) ,["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) ,["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ ,strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""h\u00E9llo"""] )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ ,strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) ,["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ ,strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ ,strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ ,never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) ,["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer()
SCREAMING_SNAKE_CASE = """a\n'll !!to?'d of, can't."""
SCREAMING_SNAKE_CASE = ["""a""", """'""", """ll""", """!""", """!""", """to""", """?""", """'""", """d""", """of""", """,""", """can""", """'""", """t""", """."""]
self.assertListEqual(tokenizer.tokenize(lowerCamelCase__ ) ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = i
SCREAMING_SNAKE_CASE = WordpieceTokenizer(vocab=lowerCamelCase__ ,unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) ,[] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) ,["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) ,["""[UNK]""", """runn""", """##ing"""] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCamelCase__ ) for t in ["""Test""", """\xad""", """test"""]] ,[["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCamelCase__ ) for t in ["""Test""", """\xad""", """test"""]] ,[["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("""bert-base-uncased""" )
SCREAMING_SNAKE_CASE = tokenizer.encode("""sequence builders""" ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ ,lowerCamelCase__ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
SCREAMING_SNAKE_CASE = tokenizer_r.encode_plus(
lowerCamelCase__ ,return_attention_mask=lowerCamelCase__ ,return_token_type_ids=lowerCamelCase__ ,return_offsets_mapping=lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = tokenizer_r.do_lower_case if hasattr(lowerCamelCase__ ,"""do_lower_case""" ) else False
SCREAMING_SNAKE_CASE = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] ,tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] ,tokens["""offset_mapping"""] )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["""的""", """人""", """有"""]
SCREAMING_SNAKE_CASE = """""".join(lowerCamelCase__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_p.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_r.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(lowerCamelCase__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_r.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_p.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(lowerCamelCase__ )
# it is expected that only the first Chinese character is not preceded by "##".
SCREAMING_SNAKE_CASE = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(lowerCamelCase__ )
]
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
| 193 | 1 |
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class UpperCAmelCase__ :
"""simple docstring"""
UpperCAmelCase__ : List[Any] = XGLMConfig
UpperCAmelCase__ : List[str] = {}
UpperCAmelCase__ : Any = "gelu"
def __init__( self , A_ , A_=14 , A_=7 , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=0.02 , ) -> Optional[Any]:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =seq_length
__UpperCamelCase =is_training
__UpperCamelCase =use_input_mask
__UpperCamelCase =use_labels
__UpperCamelCase =vocab_size
__UpperCamelCase =d_model
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =ffn_dim
__UpperCamelCase =activation_function
__UpperCamelCase =activation_dropout
__UpperCamelCase =attention_dropout
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =initializer_range
__UpperCamelCase =None
__UpperCamelCase =0
__UpperCamelCase =2
__UpperCamelCase =1
def _a ( self ) -> Optional[Any]:
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def _a ( self ) -> Dict:
__UpperCamelCase =tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
__UpperCamelCase =None
if self.use_input_mask:
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase =self.get_config()
__UpperCamelCase =floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _a ( self ) -> Optional[int]:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=A_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=A_ , )
def _a ( self ) -> Any:
__UpperCamelCase =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =config_and_inputs
__UpperCamelCase ={
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : int = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
UpperCAmelCase__ : Optional[Any] = (TFXGLMForCausalLM,) if is_tf_available() else ()
UpperCAmelCase__ : Dict = (
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Optional[Any] = False
def _a ( self ) -> Tuple:
__UpperCamelCase =TFXGLMModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , n_embd=37 )
def _a ( self ) -> Any:
self.config_tester.run_common_tests()
@slow
def _a ( self ) -> Optional[int]:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =TFXGLMModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def _a ( self ) -> str:
super().test_resize_token_embeddings()
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self , A_=True ) -> int:
__UpperCamelCase =TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase =tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__UpperCamelCase =[2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581]
# fmt: on
__UpperCamelCase =model.generate(A_ , do_sample=A_ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , A_ )
@slow
def _a ( self ) -> Any:
__UpperCamelCase =XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase =TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
__UpperCamelCase =tokenizer('Today is a nice day and' , return_tensors='tf' )
__UpperCamelCase =tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
__UpperCamelCase =model.generate(A_ , do_sample=A_ , seed=[7, 0] )
__UpperCamelCase =tokenizer.decode(output_ids[0] , skip_special_tokens=A_ )
__UpperCamelCase =(
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(A_ , A_ )
@slow
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase =XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase ='left'
# use different length sentences to test batching
__UpperCamelCase =[
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
__UpperCamelCase =tokenizer(A_ , return_tensors='tf' , padding=A_ )
__UpperCamelCase =inputs['input_ids']
__UpperCamelCase =model.generate(input_ids=A_ , attention_mask=inputs['attention_mask'] , max_new_tokens=12 )
__UpperCamelCase =tokenizer(sentences[0] , return_tensors='tf' ).input_ids
__UpperCamelCase =model.generate(input_ids=A_ , max_new_tokens=12 )
__UpperCamelCase =tokenizer(sentences[1] , return_tensors='tf' ).input_ids
__UpperCamelCase =model.generate(input_ids=A_ , max_new_tokens=12 )
__UpperCamelCase =tokenizer.batch_decode(A_ , skip_special_tokens=A_ )
__UpperCamelCase =tokenizer.decode(output_non_padded[0] , skip_special_tokens=A_ )
__UpperCamelCase =tokenizer.decode(output_padded[0] , skip_special_tokens=A_ )
__UpperCamelCase =[
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , [non_padded_sentence, padded_sentence] )
| 62 |
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
A__ = 0
A__ = len(SCREAMING_SNAKE_CASE__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
A__ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(SCREAMING_SNAKE_CASE__ ):
return None
A__ = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
A__ = left
A__ = point
elif point > right:
A__ = right
A__ = point
else:
if item < current_item:
A__ = point - 1
else:
A__ = point + 1
return None
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> str:
'''simple docstring'''
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
A__ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(SCREAMING_SNAKE_CASE__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif point > right:
return interpolation_search_by_recursion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , point - 1 )
else:
return interpolation_search_by_recursion(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , point + 1 , SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> Tuple:
'''simple docstring'''
if collection != sorted(SCREAMING_SNAKE_CASE__ ):
raise ValueError('Collection must be ascending sorted' )
return True
if __name__ == "__main__":
import sys
lowercase_ = 0
if debug == 1:
lowercase_ = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("Sequence must be ascending sorted to apply interpolation search")
lowercase_ = 67
lowercase_ = interpolation_search(collection, target)
if result is not None:
print(f"""{target} found at positions: {result}""")
else:
print("Not found")
| 7 | 0 |
import os
def snake_case_ ( ) -> Any:
with open(os.path.dirname(snake_case ) + '/grid.txt' ) as f:
lowercase__: Union[str, Any] = [] # noqa: E741
for _ in range(20 ):
l.append([int(snake_case ) for x in f.readline().split()] )
lowercase__: List[Any] = 0
# right
for i in range(20 ):
for j in range(17 ):
lowercase__: Any = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
lowercase__: Any = temp
# down
for i in range(17 ):
for j in range(20 ):
lowercase__: List[str] = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
lowercase__: Optional[int] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
lowercase__: Union[str, Any] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
lowercase__: Optional[Any] = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
lowercase__: str = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
lowercase__: Union[str, Any] = temp
return maximum
if __name__ == "__main__":
print(solution())
| 288 |
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__lowerCAmelCase = logging.get_logger(__name__)
class __a ( __UpperCamelCase ):
__lowercase : Tuple = ['input_values', 'attention_mask']
def __init__( self , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 16_000 , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = False , lowerCAmelCase__ = 80 , lowerCAmelCase__ = 16 , lowerCAmelCase__ = 64 , lowerCAmelCase__ = "hann_window" , lowerCAmelCase__ = 1.0 , lowerCAmelCase__ = 80 , lowerCAmelCase__ = 7_600 , lowerCAmelCase__ = 1E-10 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = True , **lowerCAmelCase__ , ) -> List[Any]:
'''simple docstring'''
super().__init__(feature_size=lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , padding_value=lowerCAmelCase__ , **lowerCAmelCase__ )
lowercase__: Dict = do_normalize
lowercase__: Optional[Any] = return_attention_mask
lowercase__: str = num_mel_bins
lowercase__: Dict = hop_length
lowercase__: Dict = win_length
lowercase__: Optional[int] = win_function
lowercase__: Any = frame_signal_scale
lowercase__: Tuple = fmin
lowercase__: Tuple = fmax
lowercase__: Dict = mel_floor
lowercase__: int = reduction_factor
lowercase__: List[Any] = win_length * sampling_rate // 1_000
lowercase__: Optional[Any] = hop_length * sampling_rate // 1_000
lowercase__: Optional[int] = optimal_fft_length(self.sample_size )
lowercase__: Optional[Any] = (self.n_fft // 2) + 1
lowercase__: str = window_function(window_length=self.sample_size , name=self.win_function , periodic=lowerCAmelCase__ )
lowercase__: Optional[Any] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='slaney' , mel_scale='slaney' , )
if frame_signal_scale != 1.0:
warnings.warn(
'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers' , lowerCAmelCase__ , )
if reduction_factor != 2.0:
warnings.warn(
'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers' , lowerCAmelCase__ , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 0.0 ) -> List[np.ndarray]:
'''simple docstring'''
if attention_mask is not None:
lowercase__: List[str] = np.array(lowerCAmelCase__ , np.intaa )
lowercase__: Tuple = []
for vector, length in zip(lowerCAmelCase__ , attention_mask.sum(-1 ) ):
lowercase__: int = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
lowercase__: Tuple = padding_value
normed_input_values.append(lowerCAmelCase__ )
else:
lowercase__: Union[str, Any] = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , ) -> np.ndarray:
'''simple docstring'''
lowercase__: List[str] = spectrogram(
lowerCAmelCase__ , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='log10' , )
return log_mel_spec.T
def __call__( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> BatchFeature:
'''simple docstring'''
if audio is None and audio_target is None:
raise ValueError('You must provide either `audio` or `audio_target` values.' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if audio is not None:
lowercase__: Dict = self._process_audio(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ , )
else:
lowercase__: str = None
if audio_target is not None:
lowercase__: List[str] = self._process_audio(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ , )
if inputs is None:
return inputs_target
else:
lowercase__: int = inputs_target['input_values']
lowercase__: List[str] = inputs_target.get('attention_mask' )
if decoder_attention_mask is not None:
lowercase__: Optional[int] = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> BatchFeature:
'''simple docstring'''
lowercase__: int = isinstance(lowerCAmelCase__ , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
lowercase__: Tuple = is_batched_numpy or (
isinstance(lowerCAmelCase__ , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__: Dict = [np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(lowerCAmelCase__ , np.ndarray ):
lowercase__: Optional[Any] = np.asarray(lowerCAmelCase__ , dtype=np.floataa )
elif isinstance(lowerCAmelCase__ , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
lowercase__: Optional[Any] = speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__: Optional[int] = [speech]
# needed to make pad() work on spectrogram inputs
lowercase__: str = self.feature_size
# convert into correct format for padding
if is_target:
lowercase__: int = [self._extract_mel_features(lowerCAmelCase__ ) for waveform in speech]
lowercase__: Dict = BatchFeature({'input_values': features} )
lowercase__: Union[str, Any] = self.num_mel_bins
else:
lowercase__: Union[str, Any] = BatchFeature({'input_values': speech} )
lowercase__: Dict = self.pad(
lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
lowercase__: List[str] = feature_size_hack
# convert input values to correct format
lowercase__: Union[str, Any] = padded_inputs['input_values']
if not isinstance(input_values[0] , np.ndarray ):
lowercase__: List[Any] = [np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(lowerCAmelCase__ , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
lowercase__: Dict = [array.astype(np.floataa ) for array in input_values]
elif isinstance(lowerCAmelCase__ , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
lowercase__: Tuple = input_values.astype(np.floataa )
# convert attention_mask to correct format
lowercase__: Tuple = padded_inputs.get('attention_mask' )
if attention_mask is not None:
lowercase__: str = [np.asarray(lowerCAmelCase__ , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
lowercase__: Tuple = (
attention_mask
if self._get_padding_strategies(lowerCAmelCase__ , max_length=lowerCAmelCase__ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowercase__: str = self.zero_mean_unit_var_norm(
padded_inputs['input_values'] , attention_mask=lowerCAmelCase__ , padding_value=self.padding_value )
if return_tensors is not None:
lowercase__: Union[str, Any] = padded_inputs.convert_to_tensors(lowerCAmelCase__ )
return padded_inputs
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict[str, Any]:
'''simple docstring'''
lowercase__: int = super().to_dict()
# Don't serialize these as they are derived from the other properties.
lowercase__: str = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs']
for name in names:
if name in output:
del output[name]
return output
| 288 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
a__ : str = {
'configuration_gpt_neox_japanese': ['GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXJapaneseConfig'],
'tokenization_gpt_neox_japanese': ['GPTNeoXJapaneseTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
'GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXJapaneseForCausalLM',
'GPTNeoXJapaneseLayer',
'GPTNeoXJapaneseModel',
'GPTNeoXJapanesePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
a__ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 349 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( a , a , a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = StableDiffusionInpaintPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCAmelCase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase__ = frozenset([] )
def UpperCAmelCase__ ( self : str ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=__SCREAMING_SNAKE_CASE )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
__SCREAMING_SNAKE_CASE = CLIPTextModel(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any]=0 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(__SCREAMING_SNAKE_CASE ) ).convert("""RGB""" ).resize((64, 64) )
__SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) )
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
__SCREAMING_SNAKE_CASE = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = StableDiffusionInpaintPipeline(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = sd_pipe(**__SCREAMING_SNAKE_CASE ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Tuple ) -> str:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : List[Any] ) -> str:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : List[str] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
__SCREAMING_SNAKE_CASE = """stabilityai/stable-diffusion-2-inpainting"""
__SCREAMING_SNAKE_CASE = StableDiffusionInpaintPipeline.from_pretrained(__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE = """Face of a yellow cat, high resolution, sitting on a park bench"""
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe(
prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , mask_image=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , output_type="""np""" , )
__SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
__SCREAMING_SNAKE_CASE = """stabilityai/stable-diffusion-2-inpainting"""
__SCREAMING_SNAKE_CASE = StableDiffusionInpaintPipeline.from_pretrained(
__SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa , safety_checker=__SCREAMING_SNAKE_CASE , )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE = """Face of a yellow cat, high resolution, sitting on a park bench"""
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe(
prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , mask_image=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , output_type="""np""" , )
__SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def UpperCAmelCase__ ( self : Tuple ) -> Any:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__SCREAMING_SNAKE_CASE = """stabilityai/stable-diffusion-2-inpainting"""
__SCREAMING_SNAKE_CASE = PNDMScheduler.from_pretrained(__SCREAMING_SNAKE_CASE , subfolder="""scheduler""" )
__SCREAMING_SNAKE_CASE = StableDiffusionInpaintPipeline.from_pretrained(
__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa , )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__SCREAMING_SNAKE_CASE = """Face of a yellow cat, high resolution, sitting on a park bench"""
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe(
prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , mask_image=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type="""np""" , )
__SCREAMING_SNAKE_CASE = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 267 | 0 |
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
A_ : Optional[Any] = get_tests_dir('fixtures/dummy-config.json')
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
A__ : Optional[Any] = 0
def __A ( self ):
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) )
def __A ( self ):
A__ : str = AutoConfig.from_pretrained("""bert-base-uncased""" )
self.assertIsInstance(A__ , A__ )
def __A ( self ):
A__ : List[Any] = AutoConfig.from_pretrained(A__ )
self.assertIsInstance(A__ , A__ )
def __A ( self ):
A__ : List[Any] = AutoConfig.from_pretrained(A__ )
self.assertIsInstance(A__ , A__ )
def __A ( self ):
A__ : List[Any] = AutoConfig.for_model("""roberta""" )
self.assertIsInstance(A__ , A__ )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
A__ : str = os.path.join(A__ , """fake-roberta""" )
os.makedirs(A__ , exist_ok=A__ )
with open(os.path.join(A__ , """config.json""" ) , """w""" ) as f:
f.write(json.dumps({} ) )
A__ : Tuple = AutoConfig.from_pretrained(A__ )
self.assertEqual(type(A__ ) , A__ )
def __A ( self ):
try:
AutoConfig.register("""custom""" , A__ )
# Wrong model type will raise an error
with self.assertRaises(A__ ):
AutoConfig.register("""model""" , A__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(A__ ):
AutoConfig.register("""bert""" , A__ )
# Now that the config is registered, it can be used as any other config with the auto-API
A__ : Optional[int] = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A__ )
A__ : Optional[Any] = AutoConfig.from_pretrained(A__ )
self.assertIsInstance(A__ , A__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def __A ( self ):
with self.assertRaisesRegex(
A__ , """bert-base is not a local folder and is not a valid model identifier""" ):
A__ : Any = AutoConfig.from_pretrained("""bert-base""" )
def __A ( self ):
with self.assertRaisesRegex(
A__ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
A__ : Tuple = AutoConfig.from_pretrained(A__ , revision="""aaaaaa""" )
def __A ( self ):
with self.assertRaisesRegex(
A__ , """hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" , ):
A__ : List[Any] = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" )
def __A ( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(A__ ):
A__ : Dict = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(A__ ):
A__ : Optional[Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=A__ )
A__ : List[str] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=A__ )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A__ )
A__ : Union[str, Any] = AutoConfig.from_pretrained(A__ , trust_remote_code=A__ )
self.assertEqual(reloaded_config.__class__.__name__ , """NewModelConfig""" )
def __A ( self ):
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Union[str, Any] = '''new-model'''
try:
AutoConfig.register("""new-model""" , A__ )
# If remote code is not set, the default is to use local
A__ : Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote code is disabled, we load the local one.
A__ : Dict = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=A__ )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote is enabled, we load from the Hub
A__ : List[str] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=A__ )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 141 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
A_ : Union[str, Any] = logging.get_logger(__name__)
A_ : int = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
A_ : Tuple = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
A_ : Tuple = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
A_ : Any = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
A_ : Union[str, Any] = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
A_ : Union[str, Any] = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
A_ : Tuple = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
A_ : Optional[int] = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
A_ : Any = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
A_ : Dict = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
A_ : List[str] = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
A_ : List[str] = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
A_ : Optional[Any] = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
A_ : Optional[Any] = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
A_ : List[str] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
A_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
A_ : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
A_ : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
A_ : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
A_ : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
A_ : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
A_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
A_ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
A_ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
A_ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
A_ : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
A_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
A_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class _a (_BaseAutoModelClass ):
'''simple docstring'''
UpperCAmelCase__: str = FLAX_MODEL_MAPPING
A_ : Any = auto_class_update(FlaxAutoModel)
class _a (_BaseAutoModelClass ):
'''simple docstring'''
UpperCAmelCase__: List[Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
A_ : Union[str, Any] = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class _a (_BaseAutoModelClass ):
'''simple docstring'''
UpperCAmelCase__: Any = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
A_ : Union[str, Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class _a (_BaseAutoModelClass ):
'''simple docstring'''
UpperCAmelCase__: Optional[Any] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
A_ : Tuple = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class _a (_BaseAutoModelClass ):
'''simple docstring'''
UpperCAmelCase__: Dict = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
A_ : Dict = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class _a (_BaseAutoModelClass ):
'''simple docstring'''
UpperCAmelCase__: Dict = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
A_ : Tuple = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class _a (_BaseAutoModelClass ):
'''simple docstring'''
UpperCAmelCase__: Any = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
A_ : int = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class _a (_BaseAutoModelClass ):
'''simple docstring'''
UpperCAmelCase__: Tuple = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
A_ : Dict = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class _a (_BaseAutoModelClass ):
'''simple docstring'''
UpperCAmelCase__: List[Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
A_ : Union[str, Any] = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class _a (_BaseAutoModelClass ):
'''simple docstring'''
UpperCAmelCase__: str = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
A_ : Optional[Any] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class _a (_BaseAutoModelClass ):
'''simple docstring'''
UpperCAmelCase__: str = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
A_ : List[Any] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class _a (_BaseAutoModelClass ):
'''simple docstring'''
UpperCAmelCase__: Dict = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
A_ : Union[str, Any] = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class _a (_BaseAutoModelClass ):
'''simple docstring'''
UpperCAmelCase__: int = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
A_ : List[str] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 141 | 1 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {'''vocab_file''': '''spiece.model'''}
lowerCamelCase_ = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
lowerCamelCase_ = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
lowerCamelCase_ = 0
lowerCamelCase_ = 1
lowerCamelCase_ = 2
lowerCamelCase_ = 3
lowerCamelCase_ = 4
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case = '''left'''
def __init__( self : Any , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple=False , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : List[Any]=False , __UpperCAmelCase : List[Any]="<s>" , __UpperCAmelCase : Optional[Any]="</s>" , __UpperCAmelCase : Dict="<unk>" , __UpperCAmelCase : Tuple="<sep>" , __UpperCAmelCase : List[str]="<pad>" , __UpperCAmelCase : int="<cls>" , __UpperCAmelCase : Dict="<mask>" , __UpperCAmelCase : Optional[Any]=["<eop>", "<eod>"] , __UpperCAmelCase : Optional[Dict[str, Any]] = None , **__UpperCAmelCase : List[Any] , ):
'''simple docstring'''
_A = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
_A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
_A = 3
_A = do_lower_case
_A = remove_space
_A = keep_accents
_A = vocab_file
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
@property
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
return len(self.sp_model )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
'''simple docstring'''
_A = self.__dict__.copy()
_A = None
return state
def __setstate__( self : Any , __UpperCAmelCase : str ):
'''simple docstring'''
_A = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Optional[int] ):
'''simple docstring'''
if self.remove_space:
_A = " ".join(inputs.strip().split() )
else:
_A = inputs
_A = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
_A = unicodedata.normalize("NFKD" , __UpperCAmelCase )
_A = "".join([c for c in outputs if not unicodedata.combining(__UpperCAmelCase )] )
if self.do_lower_case:
_A = outputs.lower()
return outputs
def lowerCAmelCase ( self : str , __UpperCAmelCase : str ):
'''simple docstring'''
_A = self.preprocess_text(__UpperCAmelCase )
_A = self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
_A = []
for piece in pieces:
if len(__UpperCAmelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
_A = self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCAmelCase , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_A = cur_pieces[1:]
else:
_A = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__UpperCAmelCase )
else:
new_pieces.append(__UpperCAmelCase )
return new_pieces
def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Optional[int] ):
'''simple docstring'''
return self.sp_model.PieceToId(__UpperCAmelCase )
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : List[Any] ):
'''simple docstring'''
return self.sp_model.IdToPiece(__UpperCAmelCase )
def lowerCAmelCase ( self : str , __UpperCAmelCase : Dict ):
'''simple docstring'''
_A = "".join(__UpperCAmelCase ).replace(__UpperCAmelCase , " " ).strip()
return out_string
def lowerCAmelCase ( self : int , __UpperCAmelCase : List[int] , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = None , __UpperCAmelCase : bool = True , **__UpperCAmelCase : List[Any] , ):
'''simple docstring'''
_A = kwargs.pop("use_source_tokenizer" , __UpperCAmelCase )
_A = self.convert_ids_to_tokens(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_A = []
_A = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase ) )
_A = []
sub_texts.append(__UpperCAmelCase )
else:
current_sub_text.append(__UpperCAmelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
_A = "".join(__UpperCAmelCase )
_A = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_A = self.clean_up_tokenization(__UpperCAmelCase )
return clean_text
else:
return text
def lowerCAmelCase ( self : Dict , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ):
'''simple docstring'''
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowerCAmelCase ( self : Any , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is not None:
return ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1, 1]
return ([0] * len(__UpperCAmelCase )) + [1, 1]
def lowerCAmelCase ( self : Any , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ):
'''simple docstring'''
_A = [self.sep_token_id]
_A = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , "wb" ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 79 |
def lowerCAmelCase_ ( _snake_case : str , _snake_case : str ) -> bool:
'''simple docstring'''
__magic_name__ : Union[str, Any] = len(_snake_case ) + 1
__magic_name__ : List[str] = len(_snake_case ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
__magic_name__ : str = [[0 for i in range(_snake_case )] for j in range(_snake_case )]
# since string of zero length match pattern of zero length
__magic_name__ : Optional[int] = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , _snake_case ):
__magic_name__ : Optional[int] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , _snake_case ):
__magic_name__ : Union[str, Any] = dp[0][j - 2] if pattern[j - 1] == "*" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , _snake_case ):
for j in range(1 , _snake_case ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
__magic_name__ : Optional[int] = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
__magic_name__ : Optional[Any] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
__magic_name__ : List[Any] = dp[i - 1][j]
else:
__magic_name__ : Union[str, Any] = 0
else:
__magic_name__ : Dict = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
snake_case : Optional[Any] = "aab"
snake_case : List[str] = "c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F"{input_string} matches the given pattern {pattern}")
else:
print(F"{input_string} does not match with the given pattern {pattern}")
| 281 | 0 |
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def __lowerCamelCase ( lowerCAmelCase__ ):
return np.dot(lowerCAmelCase__ , lowerCAmelCase__ )
class a_ :
'''simple docstring'''
def __init__( self : str , *,
lowercase__ : float = np.inf , lowercase__ : str = "linear" , lowercase__ : float = 0.0 , ):
'''simple docstring'''
lowerCAmelCase__ = regularization
lowerCAmelCase__ = gamma
if kernel == "linear":
lowerCAmelCase__ = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('rbf kernel requires gamma')
if not isinstance(self.gamma , (float, int)):
raise ValueError('gamma must be float or int')
if not self.gamma > 0:
raise ValueError('gamma must be > 0')
lowerCAmelCase__ = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
lowerCAmelCase__ = F"""Unknown kernel: {kernel}"""
raise ValueError(lowercase__)
def __snake_case ( self : List[str] , lowercase__ : ndarray , lowercase__ : ndarray):
'''simple docstring'''
return np.dot(lowercase__ , lowercase__)
def __snake_case ( self : Dict , lowercase__ : ndarray , lowercase__ : ndarray):
'''simple docstring'''
return np.exp(-(self.gamma * norm_squared(vectora - vectora)))
def __snake_case ( self : List[Any] , lowercase__ : list[ndarray] , lowercase__ : ndarray):
'''simple docstring'''
lowerCAmelCase__ = observations
lowerCAmelCase__ = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((lowerCAmelCase__ ) , ) = np.shape(lowercase__)
def to_minimize(lowercase__ : ndarray) -> float:
lowerCAmelCase__ = 0
((lowerCAmelCase__ ) , ) = np.shape(lowercase__)
for i in range(lowercase__):
for j in range(lowercase__):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j])
)
return 1 / 2 * s - sum(lowercase__)
lowerCAmelCase__ = LinearConstraint(lowercase__ , 0 , 0)
lowerCAmelCase__ = Bounds(0 , self.regularization)
lowerCAmelCase__ = minimize(
lowercase__ , np.ones(lowercase__) , bounds=lowercase__ , constraints=[ly_contraint]).x
lowerCAmelCase__ = l_star
# calculating mean offset of separation plane to points
lowerCAmelCase__ = 0
for i in range(lowercase__):
for j in range(lowercase__):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j])
lowerCAmelCase__ = s / n
def __snake_case ( self : List[Any] , lowercase__ : ndarray):
'''simple docstring'''
lowerCAmelCase__ = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , lowercase__)
for n in range(len(self.classes)))
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356 | from __future__ import annotations
lowerCAmelCase__ = tuple[int, int, int]
lowerCAmelCase__ = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
lowerCAmelCase__ = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# -------------------------- default selection --------------------------
# rotors --------------------------
lowerCAmelCase__ = 'EGZWVONAHDCLFQMSIPJBYUKXTR'
lowerCAmelCase__ = 'FOBHMDKEXQNRAULPGSJVTYICZW'
lowerCAmelCase__ = 'ZJXESIUQLHAVRMDOYGTNFWPBKC'
# reflector --------------------------
lowerCAmelCase__ = {
'A': 'N',
'N': 'A',
'B': 'O',
'O': 'B',
'C': 'P',
'P': 'C',
'D': 'Q',
'Q': 'D',
'E': 'R',
'R': 'E',
'F': 'S',
'S': 'F',
'G': 'T',
'T': 'G',
'H': 'U',
'U': 'H',
'I': 'V',
'V': 'I',
'J': 'W',
'W': 'J',
'K': 'X',
'X': 'K',
'L': 'Y',
'Y': 'L',
'M': 'Z',
'Z': 'M',
}
# -------------------------- extra rotors --------------------------
lowerCAmelCase__ = 'RMDJXFUWGISLHVTCQNKYPBEZOA'
lowerCAmelCase__ = 'SGLCPQWZHKXAREONTFBVIYJUDM'
lowerCAmelCase__ = 'HVSICLTYKQUBXDWAJZOMFGPREN'
lowerCAmelCase__ = 'RZWQHFMVDBKICJLNTUXAGYPSOE'
lowerCAmelCase__ = 'LFKIJODBEGAMQPXVUHYSTCZRWN'
lowerCAmelCase__ = 'KOAEGVDHXPQZMLFTYWJNBRCIUS'
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(lowerCAmelCase__ ) )) < 3:
lowerCAmelCase__ = F"""Please use 3 unique rotors (not {unique_rotsel})"""
raise Exception(lowerCAmelCase__ )
# Checks if rotor positions are valid
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = rotpos
if not 0 < rotorposa <= len(lowerCAmelCase__ ):
lowerCAmelCase__ = F"""First rotor position is not within range of 1..26 ({rotorposa}"""
raise ValueError(lowerCAmelCase__ )
if not 0 < rotorposa <= len(lowerCAmelCase__ ):
lowerCAmelCase__ = F"""Second rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(lowerCAmelCase__ )
if not 0 < rotorposa <= len(lowerCAmelCase__ ):
lowerCAmelCase__ = F"""Third rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(lowerCAmelCase__ )
# Validates string and returns dict
lowerCAmelCase__ = _plugboard(lowerCAmelCase__ )
return rotpos, rotsel, pbdict
def __lowerCamelCase ( lowerCAmelCase__ ):
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCAmelCase__ = F"""Plugboard setting isn't type string ({type(lowerCAmelCase__ )})"""
raise TypeError(lowerCAmelCase__ )
elif len(lowerCAmelCase__ ) % 2 != 0:
lowerCAmelCase__ = F"""Odd number of symbols ({len(lowerCAmelCase__ )})"""
raise Exception(lowerCAmelCase__ )
elif pbstring == "":
return {}
pbstring.replace(' ' , '' )
# Checks if all characters are unique
lowerCAmelCase__ = set()
for i in pbstring:
if i not in abc:
lowerCAmelCase__ = F"""'{i}' not in list of symbols"""
raise Exception(lowerCAmelCase__ )
elif i in tmppbl:
lowerCAmelCase__ = F"""Duplicate symbol ({i})"""
raise Exception(lowerCAmelCase__ )
else:
tmppbl.add(lowerCAmelCase__ )
del tmppbl
# Created the dictionary
lowerCAmelCase__ = {}
for j in range(0 , len(lowerCAmelCase__ ) - 1 , 2 ):
lowerCAmelCase__ = pbstring[j + 1]
lowerCAmelCase__ = pbstring[j]
return pb
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = (rotora, rotora, rotora) , lowerCAmelCase__ = "" , ):
lowerCAmelCase__ = text.upper()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = _validator(
lowerCAmelCase__ , lowerCAmelCase__ , plugb.upper() )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = rotor_position
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
lowerCAmelCase__ = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
lowerCAmelCase__ = plugboard[symbol]
# rotor ra --------------------------
lowerCAmelCase__ = abc.index(lowerCAmelCase__ ) + rotorposa
lowerCAmelCase__ = rotora[index % len(lowerCAmelCase__ )]
# rotor rb --------------------------
lowerCAmelCase__ = abc.index(lowerCAmelCase__ ) + rotorposa
lowerCAmelCase__ = rotora[index % len(lowerCAmelCase__ )]
# rotor rc --------------------------
lowerCAmelCase__ = abc.index(lowerCAmelCase__ ) + rotorposa
lowerCAmelCase__ = rotora[index % len(lowerCAmelCase__ )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
lowerCAmelCase__ = reflector[symbol]
# 2nd rotors
lowerCAmelCase__ = abc[rotora.index(lowerCAmelCase__ ) - rotorposa]
lowerCAmelCase__ = abc[rotora.index(lowerCAmelCase__ ) - rotorposa]
lowerCAmelCase__ = abc[rotora.index(lowerCAmelCase__ ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
lowerCAmelCase__ = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(lowerCAmelCase__ ):
lowerCAmelCase__ = 0
rotorposa += 1
if rotorposa >= len(lowerCAmelCase__ ):
lowerCAmelCase__ = 0
rotorposa += 1
if rotorposa >= len(lowerCAmelCase__ ):
lowerCAmelCase__ = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(lowerCAmelCase__ )
return "".join(lowerCAmelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = 'This is my Python script that emulates the Enigma machine from WWII.'
lowerCAmelCase__ = (1, 1, 1)
lowerCAmelCase__ = 'pictures'
lowerCAmelCase__ = (rotora, rotora, rotora)
lowerCAmelCase__ = enigma(message, rotor_pos, rotor_sel, pb)
print('Encrypted message:', en)
print('Decrypted message:', enigma(en, rotor_pos, rotor_sel, pb))
| 119 | 0 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , a : Optional[int] , a : str=13 , a : str=7 , a : List[Any]=True , a : List[str]=True , a : int=True , a : Any=True , a : Tuple=99 , a : int=32 , a : Union[str, Any]=5 , a : str=4 , a : Optional[Any]=37 , a : Optional[Any]="gelu" , a : Any=0.1 , a : Optional[Any]=0.1 , a : Any=512 , a : int=16 , a : Optional[int]=2 , a : Optional[int]=0.02 , a : str=4 , ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = parent
SCREAMING_SNAKE_CASE : List[str] = batch_size
SCREAMING_SNAKE_CASE : Optional[Any] = seq_length
SCREAMING_SNAKE_CASE : List[Any] = is_training
SCREAMING_SNAKE_CASE : Optional[Any] = use_attention_mask
SCREAMING_SNAKE_CASE : Any = use_token_type_ids
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : List[str] = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : int = type_vocab_size
SCREAMING_SNAKE_CASE : List[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : List[str] = initializer_range
SCREAMING_SNAKE_CASE : str = num_choices
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : Optional[Any] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[Any] = config_and_inputs
SCREAMING_SNAKE_CASE : Dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class _UpperCamelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =True
lowerCamelCase__ =(
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = FlaxRoFormerModelTester(self )
@slow
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE : int = model_class_name.from_pretrained("junnyu/roformer_chinese_small" , from_pt=a )
SCREAMING_SNAKE_CASE : Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(a )
@require_flax
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = FlaxRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
SCREAMING_SNAKE_CASE : Tuple = jnp.array([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE : Tuple = model(a )[0]
SCREAMING_SNAKE_CASE : Optional[int] = 5_0000
SCREAMING_SNAKE_CASE : Any = (1, 6, vocab_size)
self.assertEqual(output.shape , a )
SCREAMING_SNAKE_CASE : Any = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , a , atol=1e-4 ) ) | 76 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def _A ( lowercase__ ):
lowercase__ = torch.load(lowercase__ , map_location="""cpu""" )
if "model" in sd.keys():
lowercase__ = torch.load(lowercase__ , map_location="""cpu""" )["""model"""]
# pop unnecessary weights
lowercase__ = [
"""decoder.version""",
"""decoder.output_projection.weight""",
]
for key in keys_to_delete:
if key in sd:
sd.pop(lowercase__ )
lowercase__ = {
"""decoder.project_in_dim.weight""": """decoder.project_in.weight""",
"""decoder.project_out_dim.weight""": """decoder.project_out.weight""",
"""decoder.layer_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.layer_norm.bias""": """decoder.final_layer_norm.bias""",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
lowercase__ = sd.pop(lowercase__ )
lowercase__ = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
lowercase__ = sd[key]
# We split QKV in separate Q,K,V
lowercase__ = key.replace(""".qkv_proj.""" , """.q_proj.""" )
lowercase__ = key.replace(""".qkv_proj.""" , """.k_proj.""" )
lowercase__ = key.replace(""".qkv_proj.""" , """.v_proj.""" )
lowercase__ = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
lowercase__ , lowercase__ , lowercase__ = torch.split(lowercase__ , depth // 3 , dim=0 )
lowercase__ = q
lowercase__ = k
lowercase__ = v
del sd[key]
return sd
@torch.no_grad()
def _A ( lowercase__ , lowercase__ , lowercase__=None ):
lowercase__ = load_checkpoint(lowercase__ )
if config is not None:
lowercase__ = OPTConfig.from_pretrained(lowercase__ )
else:
lowercase__ = OPTConfig()
lowercase__ = OPTModel(lowercase__ ).half().eval()
model.load_state_dict(lowercase__ )
# Check results
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fairseq_path",
type=str,
help=(
"path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"
" https://huggingface.co/models?other=opt_metasq"
),
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--hf_config", default=None, type=str, help="Define HF config.")
__A = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 164 | 0 |
import math
class A_ :
def __init__(self :int , _UpperCamelCase :Any=0 )-> Any: # a graph with Node 0,1,...,N-1
__A = n
__A = [
[math.inf for j in range(0 , __snake_case )] for i in range(0 , __snake_case )
] # adjacency matrix for weight
__A = [
[math.inf for j in range(0 , __snake_case )] for i in range(0 , __snake_case )
] # dp[i][j] stores minimum distance from i to j
def _lowerCAmelCase (self :int , _UpperCamelCase :str , _UpperCamelCase :Tuple , _UpperCamelCase :int )-> Optional[Any]:
__A = w
def _lowerCAmelCase (self :int )-> Dict:
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
__A = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def _lowerCAmelCase (self :List[str] , _UpperCamelCase :Any , _UpperCamelCase :List[str] )-> Optional[int]:
return self.dp[u][v]
if __name__ == "__main__":
snake_case__ : Any = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 358 |
import math
def _a ( lowerCamelCase: int ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _a ( lowerCamelCase: float = 0.1 ) -> int:
'''simple docstring'''
__A = 3
__A = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(lowerCamelCase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 250 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_a = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = ["pixel_values"]
def __init__( self , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = PILImageResampling.BICUBIC , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = 1 / 255 , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = True , **UpperCAmelCase , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase )
_UpperCAmelCase = size if size is not None else {'shortest_edge': 224}
_UpperCAmelCase = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
_UpperCAmelCase = crop_size if crop_size is not None else {'height': 224, 'width': 224}
_UpperCAmelCase = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase , param_name='crop_size' )
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_UpperCAmelCase = image_std if image_std is not None else OPENAI_CLIP_STD
_UpperCAmelCase = do_convert_rgb
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = PILImageResampling.BICUBIC , UpperCAmelCase = None , **UpperCAmelCase , ):
"""simple docstring"""
_UpperCAmelCase = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
_UpperCAmelCase = get_resize_output_image_size(UpperCAmelCase , size=size['shortest_edge'] , default_to_square=UpperCAmelCase )
return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ):
"""simple docstring"""
_UpperCAmelCase = get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(UpperCAmelCase , size=(size['height'], size['width']) , data_format=UpperCAmelCase , **UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ):
"""simple docstring"""
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ):
"""simple docstring"""
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = ChannelDimension.FIRST , **UpperCAmelCase , ):
"""simple docstring"""
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(UpperCAmelCase , param_name='size' , default_to_square=UpperCAmelCase )
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(UpperCAmelCase , param_name='crop_size' , default_to_square=UpperCAmelCase )
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_UpperCAmelCase = make_list_of_images(UpperCAmelCase )
if not valid_images(UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_UpperCAmelCase = [convert_to_rgb(UpperCAmelCase ) for image in images]
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(UpperCAmelCase ) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase ) for image in images]
if do_center_crop:
_UpperCAmelCase = [self.center_crop(image=UpperCAmelCase , size=UpperCAmelCase ) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase ) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images]
_UpperCAmelCase = {'pixel_values': images}
return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
| 39 |
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
_a = get_logger(__name__)
class __lowerCamelCase ( enum.Enum):
"""simple docstring"""
UpperCamelCase__ = "all_checks"
UpperCamelCase__ = "basic_checks"
UpperCamelCase__ = "no_checks"
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None )-> str:
"""simple docstring"""
if expected_checksums is None:
logger.info('Unable to verify checksums.' )
return
if len(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) )
if len(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) > 0:
raise UnexpectedDownloadedFile(str(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) )
_UpperCAmelCase = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
_UpperCAmelCase = ' for ' + verification_name if verification_name is not None else ''
if len(__lowerCAmelCase ) > 0:
raise NonMatchingChecksumError(
F"""Checksums didn't match{for_verification_name}:\n"""
F"""{bad_urls}\n"""
'Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error' )
logger.info('All the checksums matched successfully' + for_verification_name )
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> int:
"""simple docstring"""
if expected_splits is None:
logger.info('Unable to verify splits sizes.' )
return
if len(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) > 0:
raise ExpectedMoreSplits(str(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) )
if len(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) > 0:
raise UnexpectedSplits(str(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) )
_UpperCAmelCase = [
{'expected': expected_splits[name], 'recorded': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(__lowerCAmelCase ) > 0:
raise NonMatchingSplitsSizesError(str(__lowerCAmelCase ) )
logger.info('All the splits matched successfully.' )
def __A ( __lowerCAmelCase , __lowerCAmelCase = True )-> dict:
"""simple docstring"""
if record_checksum:
_UpperCAmelCase = shaaaa()
with open(__lowerCAmelCase , 'rb' ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , b'' ):
m.update(__lowerCAmelCase )
_UpperCAmelCase = m.hexdigest()
else:
_UpperCAmelCase = None
return {"num_bytes": os.path.getsize(__lowerCAmelCase ), "checksum": checksum}
def __A ( __lowerCAmelCase )-> List[str]:
"""simple docstring"""
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 39 | 1 |
'''simple docstring'''
from math import isqrt
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
return all(number % divisor != 0 for divisor in range(2 ,isqrt(__UpperCamelCase ) + 1 ) )
def lowercase__( __UpperCamelCase: int = 10**6 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : int = 1
SCREAMING_SNAKE_CASE : List[str] = 7
while prime_candidate < max_prime:
primes_count += is_prime(__UpperCamelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 246 |
'''simple docstring'''
from __future__ import annotations
def lowercase__( __UpperCamelCase: list[int] ,__UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: int ):
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = array[indexa], array[indexa]
def lowercase__( __UpperCamelCase: list[int] ,__UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: int ):
"""simple docstring"""
if length > 1:
SCREAMING_SNAKE_CASE : Union[str, Any] = int(length / 2 )
for i in range(__UpperCamelCase ,low + middle ):
comp_and_swap(__UpperCamelCase ,__UpperCamelCase ,i + middle ,__UpperCamelCase )
bitonic_merge(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
bitonic_merge(__UpperCamelCase ,low + middle ,__UpperCamelCase ,__UpperCamelCase )
def lowercase__( __UpperCamelCase: list[int] ,__UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: int ):
"""simple docstring"""
if length > 1:
SCREAMING_SNAKE_CASE : Dict = int(length / 2 )
bitonic_sort(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,1 )
bitonic_sort(__UpperCamelCase ,low + middle ,__UpperCamelCase ,0 )
bitonic_merge(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase_ = input("Enter numbers separated by a comma:\n").strip()
UpperCamelCase_ = [int(item.strip()) for item in user_input.split(",")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("\nSorted array in ascending order is: ", end="")
print(*unsorted, sep=", ")
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("Sorted array in descending order is: ", end="")
print(*unsorted, sep=", ")
| 246 | 1 |
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
snake_case_ : Any = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class __snake_case ( nn.Module ):
def __init__( self : List[Any] , _snake_case : Any):
"""simple docstring"""
super().__init__()
UpperCAmelCase_ = torchvision.models.resnetaaa(pretrained=_snake_case)
UpperCAmelCase_ = list(model.children())[:-2]
UpperCAmelCase_ = nn.Sequential(*_snake_case)
UpperCAmelCase_ = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds])
def lowerCamelCase ( self : Optional[Any] , _snake_case : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.pool(self.model(_snake_case))
UpperCAmelCase_ = torch.flatten(_snake_case , start_dim=2)
UpperCAmelCase_ = out.transpose(1 , 2).contiguous()
return out # BxNx2048
class __snake_case ( a ):
def __init__( self : int , _snake_case : List[Any] , _snake_case : List[str] , _snake_case : Optional[Any] , _snake_case : List[Any] , _snake_case : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = [json.loads(_snake_case) for l in open(_snake_case)]
UpperCAmelCase_ = os.path.dirname(_snake_case)
UpperCAmelCase_ = tokenizer
UpperCAmelCase_ = labels
UpperCAmelCase_ = len(_snake_case)
UpperCAmelCase_ = max_seq_length
UpperCAmelCase_ = transforms
def __len__( self : Union[str, Any]):
"""simple docstring"""
return len(self.data)
def __getitem__( self : Dict , _snake_case : Tuple):
"""simple docstring"""
UpperCAmelCase_ = torch.LongTensor(self.tokenizer.encode(self.data[index]['''text'''] , add_special_tokens=_snake_case))
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = sentence[0], sentence[1:-1], sentence[-1]
UpperCAmelCase_ = sentence[: self.max_seq_length]
UpperCAmelCase_ = torch.zeros(self.n_classes)
UpperCAmelCase_ = 1
UpperCAmelCase_ = Image.open(os.path.join(self.data_dir , self.data[index]['''img'''])).convert('''RGB''')
UpperCAmelCase_ = self.transforms(_snake_case)
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = Counter()
for row in self.data:
label_freqs.update(row['''label'''])
return label_freqs
def A (__A : List[str] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = [len(row['''sentence'''] ) for row in batch]
UpperCAmelCase_ , UpperCAmelCase_ = len(__A ), max(__A )
UpperCAmelCase_ = torch.zeros(__A , __A , dtype=torch.long )
UpperCAmelCase_ = torch.zeros(__A , __A , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(__A , __A ) ):
UpperCAmelCase_ = input_row['''sentence''']
UpperCAmelCase_ = 1
UpperCAmelCase_ = torch.stack([row['''image'''] for row in batch] )
UpperCAmelCase_ = torch.stack([row['''label'''] for row in batch] )
UpperCAmelCase_ = torch.stack([row['''image_start_token'''] for row in batch] )
UpperCAmelCase_ = torch.stack([row['''image_end_token'''] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def A () -> Dict:
"""simple docstring"""
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def A () -> Optional[Any]:
"""simple docstring"""
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ),
] )
| 51 |
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class A_ ( unittest.TestCase ):
def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Any=1_3 ,SCREAMING_SNAKE_CASE__ : int=7 ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : Dict=True ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : Dict=True ,SCREAMING_SNAKE_CASE__ : List[Any]=9_9 ,SCREAMING_SNAKE_CASE__ : List[Any]=3_2 ,SCREAMING_SNAKE_CASE__ : int=5 ,SCREAMING_SNAKE_CASE__ : List[Any]=4 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=3_7 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="gelu" ,SCREAMING_SNAKE_CASE__ : int=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[int]=5_1_2 ,SCREAMING_SNAKE_CASE__ : Dict=1_6 ,SCREAMING_SNAKE_CASE__ : Dict=2 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.02 ,SCREAMING_SNAKE_CASE__ : Dict=4 ,):
__lowerCamelCase : int = parent
__lowerCamelCase : Dict = batch_size
__lowerCamelCase : Union[str, Any] = seq_length
__lowerCamelCase : List[Any] = is_training
__lowerCamelCase : Tuple = use_attention_mask
__lowerCamelCase : List[str] = use_token_type_ids
__lowerCamelCase : Any = use_labels
__lowerCamelCase : List[str] = vocab_size
__lowerCamelCase : Any = hidden_size
__lowerCamelCase : Tuple = num_hidden_layers
__lowerCamelCase : Union[str, Any] = num_attention_heads
__lowerCamelCase : Union[str, Any] = intermediate_size
__lowerCamelCase : List[Any] = hidden_act
__lowerCamelCase : int = hidden_dropout_prob
__lowerCamelCase : int = attention_probs_dropout_prob
__lowerCamelCase : Union[str, Any] = max_position_embeddings
__lowerCamelCase : Union[str, Any] = type_vocab_size
__lowerCamelCase : List[str] = type_sequence_label_size
__lowerCamelCase : Tuple = initializer_range
__lowerCamelCase : Optional[int] = num_choices
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size)
__lowerCamelCase : Union[str, Any] = None
if self.use_attention_mask:
__lowerCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length])
__lowerCamelCase : str = DistilBertConfig(
vocab_size=self.vocab_size ,dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,hidden_dim=self.intermediate_size ,hidden_act=self.hidden_act ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,tie_weights_=SCREAMING_SNAKE_CASE__ ,)
return config, input_ids, attention_mask
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : List[str] = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Dict = config_and_inputs
__lowerCamelCase : Any = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : Dict = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Tuple = FlaxDistilBertModelTester(self)
@slow
def lowerCAmelCase ( self : int):
for model_class_name in self.all_model_classes:
__lowerCamelCase : List[Any] = model_class_name.from_pretrained('distilbert-base-uncased')
__lowerCamelCase : List[str] = model(np.ones((1, 1)))
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
@require_flax
class A_ ( unittest.TestCase ):
@slow
def lowerCAmelCase ( self : str):
__lowerCamelCase : Union[str, Any] = FlaxDistilBertModel.from_pretrained('distilbert-base-uncased')
__lowerCamelCase : str = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]])
__lowerCamelCase : List[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
__lowerCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__)[0]
__lowerCamelCase : Optional[int] = (1, 1_1, 7_6_8)
self.assertEqual(output.shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]])
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4))
| 73 | 0 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
lowerCamelCase_ = Image.open(requests.get(lowercase , stream=lowercase ).raw ).convert('RGB' )
return image
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] ):
'''simple docstring'''
lowerCamelCase_ = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.weight""", f"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.bias""", f"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.weight""", f"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.bias""", f"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.qkv.weight""", f"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.weight""", f"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.bias""", f"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def _SCREAMING_SNAKE_CASE ( lowercase : Any , lowercase : Optional[int] , lowercase : List[Any] ):
'''simple docstring'''
lowerCamelCase_ = dct.pop(lowercase )
lowerCamelCase_ = val
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Tuple ):
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowerCamelCase_ = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.q_bias""" )
lowerCamelCase_ = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
lowerCamelCase_ = torch.cat((q_bias, torch.zeros_like(lowercase , requires_grad=lowercase ), v_bias) )
lowerCamelCase_ = qkv_bias
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] , lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = 3_64 if 'coco' in model_name else 2_24
lowerCamelCase_ = BlipaVisionConfig(image_size=lowercase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
lowerCamelCase_ = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=lowercase ).to_dict()
elif "opt-6.7b" in model_name:
lowerCamelCase_ = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=lowercase ).to_dict()
elif "t5-xl" in model_name:
lowerCamelCase_ = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowerCamelCase_ = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
lowerCamelCase_ = BlipaConfig(vision_config=lowercase , text_config=lowercase )
return config, image_size
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( lowercase : Any , lowercase : Optional[Any]=None , lowercase : Dict=False ):
'''simple docstring'''
lowerCamelCase_ = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
lowerCamelCase_ = tokenizer('\n' , add_special_tokens=lowercase ).input_ids[0]
lowerCamelCase_ , lowerCamelCase_ = get_blipa_config(lowercase , eos_token_id=lowercase )
lowerCamelCase_ = BlipaForConditionalGeneration(lowercase ).eval()
lowerCamelCase_ = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
lowerCamelCase_ , lowerCamelCase_ = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
lowerCamelCase_ = 'cuda' if torch.cuda.is_available() else 'cpu'
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = load_model_and_preprocess(
name=lowercase , model_type=lowercase , is_eval=lowercase , device=lowercase )
original_model.eval()
print('Done!' )
# update state dict keys
lowerCamelCase_ = original_model.state_dict()
lowerCamelCase_ = create_rename_keys(lowercase )
for src, dest in rename_keys:
rename_key(lowercase , lowercase , lowercase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowerCamelCase_ = state_dict.pop(lowercase )
if key.startswith('Qformer.bert' ):
lowerCamelCase_ = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
lowerCamelCase_ = key.replace('self' , 'attention' )
if "opt_proj" in key:
lowerCamelCase_ = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
lowerCamelCase_ = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
lowerCamelCase_ = key.replace('opt' , 'language' )
if key.startswith('t5' ):
lowerCamelCase_ = key.replace('t5' , 'language' )
lowerCamelCase_ = val
# read in qv biases
read_in_q_v_bias(lowercase , lowercase )
lowerCamelCase_ , lowerCamelCase_ = hf_model.load_state_dict(lowercase , strict=lowercase )
assert len(lowercase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
lowerCamelCase_ = load_demo_image()
lowerCamelCase_ = vis_processors['eval'](lowercase ).unsqueeze(0 ).to(lowercase )
lowerCamelCase_ = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(lowercase )
# create processor
lowerCamelCase_ = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=lowercase , image_std=lowercase )
lowerCamelCase_ = BlipaProcessor(image_processor=lowercase , tokenizer=lowercase )
lowerCamelCase_ = processor(images=lowercase , return_tensors='pt' ).pixel_values.to(lowercase )
# make sure processor creates exact same pixel values
assert torch.allclose(lowercase , lowercase )
original_model.to(lowercase )
hf_model.to(lowercase )
with torch.no_grad():
if "opt" in model_name:
lowerCamelCase_ = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
lowerCamelCase_ = hf_model(lowercase , lowercase ).logits
else:
lowerCamelCase_ = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
lowerCamelCase_ = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
lowerCamelCase_ = hf_model(lowercase , lowercase , labels=lowercase ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
lowerCamelCase_ = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=lowercase )
assert torch.allclose(logits[0, :3, :3] , lowercase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
lowerCamelCase_ = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=lowercase )
else:
# cast to same type
lowerCamelCase_ = logits.dtype
assert torch.allclose(original_logits.to(lowercase ) , lowercase , atol=1e-2 )
print('Looks ok!' )
print('Generating a caption...' )
lowerCamelCase_ = ''
lowerCamelCase_ = tokenizer(lowercase , return_tensors='pt' ).input_ids.to(lowercase )
lowerCamelCase_ = original_model.generate({'image': original_pixel_values} )
lowerCamelCase_ = hf_model.generate(
lowercase , lowercase , do_sample=lowercase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , lowercase )
lowerCamelCase_ = input_ids.shape[1]
lowerCamelCase_ = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=lowercase )
lowerCamelCase_ = [text.strip() for text in output_text]
print('HF generation:' , lowercase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(lowercase )
hf_model.save_pretrained(lowercase )
if push_to_hub:
processor.push_to_hub(f"""nielsr/{model_name}""" )
hf_model.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
lowerCamelCase : str = argparse.ArgumentParser()
lowerCamelCase : Optional[Any] = [
"blip2-opt-2.7b",
"blip2-opt-6.7b",
"blip2-opt-2.7b-coco",
"blip2-opt-6.7b-coco",
"blip2-flan-t5-xl",
"blip2-flan-t5-xl-coco",
"blip2-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="blip2-opt-2.7b",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
lowerCamelCase : List[Any] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 354 |
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
lowerCamelCase : List[Any] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase : List[Any] = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
lowerCamelCase : Optional[Any] = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
lowerCamelCase : List[Any] = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCamelCase : Union[str, Any] = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
lowerCamelCase : Tuple = [
("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"),
("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"),
("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"),
("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"),
("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"),
("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"),
("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"),
("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"),
("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"),
(
"zero-shot-object-detection",
"MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES",
"AutoModelForZeroShotObjectDetection",
),
("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"),
("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"),
("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"),
("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"),
(
"table-question-answering",
"MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForTableQuestionAnswering",
),
("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"),
("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"),
(
"next-sentence-prediction",
"MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES",
"AutoModelForNextSentencePrediction",
),
(
"audio-frame-classification",
"MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForAudioFrameClassification",
),
("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"),
(
"document-question-answering",
"MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForDocumentQuestionAnswering",
),
(
"visual-question-answering",
"MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForVisualQuestionAnswering",
),
("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"),
(
"zero-shot-image-classification",
"MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForZeroShotImageClassification",
),
("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"),
("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"),
("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"),
]
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , lowercase )
return [m.group(0 ) for m in matches]
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowerCamelCase_ = {
config.replace('Config' , '' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
lowerCamelCase_ = collections.defaultdict(lowercase )
lowerCamelCase_ = collections.defaultdict(lowercase )
lowerCamelCase_ = collections.defaultdict(lowercase )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(lowercase ):
lowerCamelCase_ = None
if _re_tf_models.match(lowercase ) is not None:
lowerCamelCase_ = tf_models
lowerCamelCase_ = _re_tf_models.match(lowercase ).groups()[0]
elif _re_flax_models.match(lowercase ) is not None:
lowerCamelCase_ = flax_models
lowerCamelCase_ = _re_flax_models.match(lowercase ).groups()[0]
elif _re_pt_models.match(lowercase ) is not None:
lowerCamelCase_ = pt_models
lowerCamelCase_ = _re_pt_models.match(lowercase ).groups()[0]
if lookup_dict is not None:
while len(lowercase ) > 0:
if attr_name in model_prefix_to_model_type:
lowerCamelCase_ = True
break
# Try again after removing the last word in the name
lowerCamelCase_ = ''.join(camel_case_split(lowercase )[:-1] )
lowerCamelCase_ = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
lowerCamelCase_ = list(lowercase )
all_models.sort()
lowerCamelCase_ = {'model_type': all_models}
lowerCamelCase_ = [pt_models[t] for t in all_models]
lowerCamelCase_ = [tf_models[t] for t in all_models]
lowerCamelCase_ = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
lowerCamelCase_ = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
lowerCamelCase_ = 'AutoProcessor'
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
lowerCamelCase_ = 'AutoTokenizer'
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
lowerCamelCase_ = 'AutoFeatureExtractor'
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
lowerCamelCase_ = 'AutoTokenizer'
lowerCamelCase_ = [processors[t] for t in all_models]
return pd.DataFrame(lowercase )
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
lowerCamelCase_ = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""]
lowerCamelCase_ = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(lowercase , lowercase , lowercase ):
# The type of pipeline may not exist in this framework
if not hasattr(lowercase , lowercase ):
continue
# First extract all model_names
lowerCamelCase_ = []
for name in getattr(lowercase , lowercase ).values():
if isinstance(lowercase , lowercase ):
model_names.append(lowercase )
else:
model_names.extend(list(lowercase ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : Optional[int] ):
'''simple docstring'''
lowerCamelCase_ = get_frameworks_table()
lowerCamelCase_ = Dataset.from_pandas(lowercase )
lowerCamelCase_ = hf_hub_download(
'huggingface/transformers-metadata' , 'pipeline_tags.json' , repo_type='dataset' , token=lowercase )
lowerCamelCase_ = Dataset.from_json(lowercase )
lowerCamelCase_ = {
tags_dataset[i]['model_class']: (tags_dataset[i]['pipeline_tag'], tags_dataset[i]['auto_class'])
for i in range(len(lowercase ) )
}
lowerCamelCase_ = update_pipeline_and_auto_class_table(lowercase )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
lowerCamelCase_ = sorted(table.keys() )
lowerCamelCase_ = pd.DataFrame(
{
'model_class': model_classes,
'pipeline_tag': [table[m][0] for m in model_classes],
'auto_class': [table[m][1] for m in model_classes],
} )
lowerCamelCase_ = Dataset.from_pandas(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(lowercase , 'frameworks.json' ) )
tags_dataset.to_json(os.path.join(lowercase , 'pipeline_tags.json' ) )
if commit_sha is not None:
lowerCamelCase_ = (
f"""Update with commit {commit_sha}\n\nSee: """
f"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
lowerCamelCase_ = 'Update'
upload_folder(
repo_id='huggingface/transformers-metadata' , folder_path=lowercase , repo_type='dataset' , token=lowercase , commit_message=lowercase , )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
lowerCamelCase_ = transformers_module.pipelines.SUPPORTED_TASKS
lowerCamelCase_ = []
for key in pipeline_tasks:
if key not in in_table:
lowerCamelCase_ = pipeline_tasks[key]['pt']
if isinstance(lowercase , (list, tuple) ):
lowerCamelCase_ = model[0]
lowerCamelCase_ = model.__name__
if model not in in_table.values():
missing.append(lowercase )
if len(lowercase ) > 0:
lowerCamelCase_ = ', '.join(lowercase )
raise ValueError(
'The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '
f"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.")
parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.")
parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.")
lowerCamelCase : Optional[int] = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 208 | 0 |
def lowercase_ (A : str , A : List[Any] , A : Dict ):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(A , n - 1 , A ) * a) % mod
else:
snake_case__ : int = binary_exponentiation(A , n / 2 , A )
return (b * b) % mod
# a prime number
a_ :Dict = 701
a_ :int = 1_000_000_000
a_ :int = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 277 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = TransfoXLTokenizer
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def lowercase_ ( self : Optional[int] ) ->Any:
super().setUp()
snake_case__ : Tuple = [
'<unk>',
'[CLS]',
'[SEP]',
'want',
'unwanted',
'wa',
'un',
'running',
',',
'low',
'l',
]
snake_case__ : Any = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowercase_ ( self : Union[str, Any], **_snake_case : List[Any] ) ->Dict:
snake_case__ : str = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname, **_snake_case )
def lowercase_ ( self : Optional[Any], _snake_case : str ) ->Dict:
snake_case__ : List[Any] = '<unk> UNwanted , running'
snake_case__ : List[Any] = '<unk> unwanted, running'
return input_text, output_text
def lowercase_ ( self : List[Any] ) ->Tuple:
snake_case__ : Dict = TransfoXLTokenizer(vocab_file=self.vocab_file, lower_case=_snake_case )
snake_case__ : str = tokenizer.tokenize('<unk> UNwanted , running' )
self.assertListEqual(_snake_case, ['<unk>', 'unwanted', ',', 'running'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ), [0, 4, 8, 7] )
def lowercase_ ( self : List[str] ) ->List[Any]:
snake_case__ : str = TransfoXLTokenizer(lower_case=_snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ), ['hello', '!', 'how', 'are', 'you', '?'] )
def lowercase_ ( self : Optional[int] ) ->Optional[Any]:
snake_case__ : Optional[int] = TransfoXLTokenizer(lower_case=_snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ), ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowercase_ ( self : Optional[int] ) ->Union[str, Any]:
snake_case__ : List[Any] = TransfoXLTokenizer(lower_case=_snake_case )
snake_case__ : Dict = 'Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'
snake_case__ : List[Any] = [
'Hello',
'(',
'bracket',
')',
'and',
'side',
'@-@',
'scrolled',
'[',
'and',
']',
'Henry',
'\'s',
'$',
'5',
'@,@',
'000',
'with',
'3',
'@.@',
'34',
'm',
'.',
'What',
'\'s',
'up',
'!',
'?',
]
self.assertListEqual(tokenizer.tokenize(_snake_case ), _snake_case )
self.assertEqual(tokenizer.convert_tokens_to_string(_snake_case ), _snake_case )
def lowercase_ ( self : Dict ) ->Any:
snake_case__ : Dict = self.get_tokenizer()
snake_case__ : Optional[Any] = len(_snake_case )
tokenizer.add_tokens(['new1', 'new2'] )
tokenizer.move_added_token('new1', 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(_snake_case ), original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('new1' ), [1] )
self.assertEqual(tokenizer.decode([1] ), 'new1' )
| 277 | 1 |
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def _A ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str]=5 ):
"""simple docstring"""
assert masked_input.count("<mask>" ) == 1
a__ : Dict =torch.tensor(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) ).unsqueeze(0 ) # Batch size 1
a__ : Dict =model(snake_case_ )[0] # The last hidden-state is the first element of the output tuple
a__ : Any =(input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
a__ : int =logits[0, masked_index, :]
a__ : Any =logits.softmax(dim=0 )
a__ , a__ : Optional[Any] =prob.topk(k=snake_case_ , dim=0 )
a__ : str =" ".join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(snake_case_ ) )] )
a__ : Union[str, Any] =tokenizer.mask_token
a__ : List[Any] =[]
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(" " ) ):
a__ : Any =predicted_token_bpe.replace("\u2581" , " " )
if " {0}".format(snake_case_ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(" {0}".format(snake_case_ ) , snake_case_ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(snake_case_ , snake_case_ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
UpperCAmelCase : Any = CamembertTokenizer.from_pretrained("""camembert-base""")
UpperCAmelCase : Optional[Any] = CamembertForMaskedLM.from_pretrained("""camembert-base""")
model.eval()
UpperCAmelCase : int = 'Le camembert est <mask> :)'
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 370 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _A ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
a__ : List[Any] ={
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
a__ : List[Any] ={
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
a__ : Optional[int] =f'''{src_lang}-{tgt_lang}'''
a__ : Any =f'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-{src_lang}-{tgt_lang}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
'''
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
a__ : Tuple =os.path.join(SCREAMING_SNAKE_CASE , "README.md" )
print(f'''Generating {path}''' )
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(SCREAMING_SNAKE_CASE )
# make sure we are under the root of the project
UpperCAmelCase : str = Path(__file__).resolve().parent.parent.parent
UpperCAmelCase : Dict = repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = model_name.split("""-""")
UpperCAmelCase : Tuple = model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 148 | 0 |
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = False
while is_sorted is False: # Until all the indices are traversed keep looping
SCREAMING_SNAKE_CASE : int = True
for i in range(0 , len(a__ ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
SCREAMING_SNAKE_CASE : Optional[Any] = False
for i in range(1 , len(a__ ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = input_list[i + 1], input_list[i]
# swapping if elements not in order
SCREAMING_SNAKE_CASE : List[Any] = False
return input_list
if __name__ == "__main__":
print('''Enter list to be sorted''')
a__ : int = [int(x) for x in input().split()]
# inputing elements of the list in one line
a__ : List[str] = odd_even_sort(input_list)
print('''The sorted list is''')
print(sorted_list)
| 313 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : jnp.ndarray
@flax_register_to_config
class a_ ( nn.Module , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__SCREAMING_SNAKE_CASE : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
__SCREAMING_SNAKE_CASE : Union[bool, Tuple[bool]] = False
__SCREAMING_SNAKE_CASE : Tuple[int] = (320, 640, 1280, 1280)
__SCREAMING_SNAKE_CASE : int = 2
__SCREAMING_SNAKE_CASE : Union[int, Tuple[int]] = 8
__SCREAMING_SNAKE_CASE : Optional[Union[int, Tuple[int]]] = None
__SCREAMING_SNAKE_CASE : int = 1280
__SCREAMING_SNAKE_CASE : float = 0.0
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : bool = False
def __lowerCAmelCase ( self , _lowerCamelCase ) ->FrozenDict:
# init input tensors
SCREAMING_SNAKE_CASE : List[Any] = (1, self.in_channels, self.sample_size, self.sample_size)
SCREAMING_SNAKE_CASE : List[Any] = jnp.zeros(_lowerCamelCase , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : Tuple = jnp.ones((1,) , dtype=jnp.intaa )
SCREAMING_SNAKE_CASE : List[Any] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = jax.random.split(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )["params"]
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : List[str] = self.block_out_channels
SCREAMING_SNAKE_CASE : Optional[int] = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
SCREAMING_SNAKE_CASE : List[str] = self.num_attention_heads or self.attention_head_dim
# input
SCREAMING_SNAKE_CASE : Optional[int] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
SCREAMING_SNAKE_CASE : Tuple = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
SCREAMING_SNAKE_CASE : Dict = FlaxTimestepEmbedding(_lowerCamelCase , dtype=self.dtype )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.only_cross_attention
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = (num_attention_heads,) * len(self.down_block_types )
# down
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : Optional[Any] = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
SCREAMING_SNAKE_CASE : str = output_channel
SCREAMING_SNAKE_CASE : int = block_out_channels[i]
SCREAMING_SNAKE_CASE : List[Any] = i == len(_lowerCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxCrossAttnDownBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxDownBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = down_blocks
# mid
SCREAMING_SNAKE_CASE : int = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : str = list(reversed(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : int = list(reversed(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : int = list(reversed(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : List[str] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
SCREAMING_SNAKE_CASE : Dict = output_channel
SCREAMING_SNAKE_CASE : Union[str, Any] = reversed_block_out_channels[i]
SCREAMING_SNAKE_CASE : Tuple = reversed_block_out_channels[min(i + 1 , len(_lowerCamelCase ) - 1 )]
SCREAMING_SNAKE_CASE : Dict = i == len(_lowerCamelCase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
SCREAMING_SNAKE_CASE : str = FlaxCrossAttnUpBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
SCREAMING_SNAKE_CASE : Optional[int] = FlaxUpBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = output_channel
SCREAMING_SNAKE_CASE : Tuple = up_blocks
# out
SCREAMING_SNAKE_CASE : Any = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
SCREAMING_SNAKE_CASE : Any = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase = True , _lowerCamelCase = False , ) ->Union[FlaxUNetaDConditionOutput, Tuple]:
# 1. time
if not isinstance(_lowerCamelCase , jnp.ndarray ):
SCREAMING_SNAKE_CASE : int = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_lowerCamelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE : List[str] = timesteps.astype(dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.expand_dims(_lowerCamelCase , 0 )
SCREAMING_SNAKE_CASE : List[str] = self.time_proj(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = self.time_embedding(_lowerCamelCase )
# 2. pre-process
SCREAMING_SNAKE_CASE : int = jnp.transpose(_lowerCamelCase , (0, 2, 3, 1) )
SCREAMING_SNAKE_CASE : List[Any] = self.conv_in(_lowerCamelCase )
# 3. down
SCREAMING_SNAKE_CASE : Optional[int] = (sample,)
for down_block in self.down_blocks:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = down_block(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , deterministic=not train )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = down_block(_lowerCamelCase , _lowerCamelCase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
SCREAMING_SNAKE_CASE : int = ()
for down_block_res_sample, down_block_additional_residual in zip(
_lowerCamelCase , _lowerCamelCase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
SCREAMING_SNAKE_CASE : Dict = new_down_block_res_samples
# 4. mid
SCREAMING_SNAKE_CASE : Optional[Any] = self.mid_block(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Optional[Any] = down_block_res_samples[-(self.layers_per_block + 1) :]
SCREAMING_SNAKE_CASE : Optional[int] = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = up_block(
_lowerCamelCase , temb=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , deterministic=not train , )
else:
SCREAMING_SNAKE_CASE : Optional[int] = up_block(_lowerCamelCase , temb=_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , deterministic=not train )
# 6. post-process
SCREAMING_SNAKE_CASE : Optional[int] = self.conv_norm_out(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = nn.silu(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = self.conv_out(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.transpose(_lowerCamelCase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=_lowerCamelCase )
| 313 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase : Any = {
'''vocab_file''': {'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'''},
'''tokenizer_file''': {
'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'''
},
}
__UpperCamelCase : Optional[Any] = {'''mobilebert-uncased''': 5_1_2}
__UpperCamelCase : Optional[Any] = {}
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_INIT_CONFIGURATION
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = MobileBertTokenizer
def __init__( self : Optional[int] ,lowercase_ : Union[str, Any]=None ,lowercase_ : List[str]=None ,lowercase_ : List[str]=True ,lowercase_ : Optional[Any]="[UNK]" ,lowercase_ : Any="[SEP]" ,lowercase_ : Dict="[PAD]" ,lowercase_ : Optional[Any]="[CLS]" ,lowercase_ : List[Any]="[MASK]" ,lowercase_ : str=True ,lowercase_ : Any=None ,**lowercase_ : List[str] ,):
super().__init__(
lowercase_ ,tokenizer_file=lowercase_ ,do_lower_case=lowercase_ ,unk_token=lowercase_ ,sep_token=lowercase_ ,pad_token=lowercase_ ,cls_token=lowercase_ ,mask_token=lowercase_ ,tokenize_chinese_chars=lowercase_ ,strip_accents=lowercase_ ,**lowercase_ ,)
lowerCAmelCase__ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' ,lowercase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' ,lowercase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' ,lowercase_ ) != tokenize_chinese_chars
):
lowerCAmelCase__ : List[Any] = getattr(lowercase_ ,normalizer_state.pop('''type''' ) )
lowerCAmelCase__ : List[str] = do_lower_case
lowerCAmelCase__ : Tuple = strip_accents
lowerCAmelCase__ : List[Any] = tokenize_chinese_chars
lowerCAmelCase__ : str = normalizer_class(**lowercase_ )
lowerCAmelCase__ : Optional[Any] = do_lower_case
def __lowerCAmelCase ( self : Optional[Any] ,lowercase_ : Dict ,lowercase_ : Optional[Any]=None ):
lowerCAmelCase__ : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self : List[Any] ,lowercase_ : List[int] ,lowercase_ : Optional[List[int]] = None ):
lowerCAmelCase__ : Optional[int] = [self.sep_token_id]
lowerCAmelCase__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : Any ,lowercase_ : str ,lowercase_ : Optional[str] = None ):
lowerCAmelCase__ : Optional[Any] = self._tokenizer.model.save(lowercase_ ,name=lowercase_ )
return tuple(lowercase_ )
| 74 |
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
__UpperCamelCase : Union[str, Any] = namedtuple(
'''_TestCommandArgs''',
[
'''dataset''',
'''name''',
'''cache_dir''',
'''data_dir''',
'''all_configs''',
'''save_infos''',
'''ignore_verifications''',
'''force_redownload''',
'''clear_cache''',
],
defaults=[None, None, None, False, False, False, False, False],
)
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : Dict = _TestCommandArgs(dataset=A_ , all_configs=A_ , save_infos=A_ )
lowerCAmelCase__ : Optional[int] = TestCommand(*A_ )
test_command.run()
lowerCAmelCase__ : int = os.path.join(A_ , '''README.md''' )
assert os.path.exists(A_ )
lowerCAmelCase__ : List[Any] = DatasetInfosDict.from_directory(A_ )
lowerCAmelCase__ : List[str] = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) , splits=[
{
'''name''': '''train''',
'''num_bytes''': 2_35_15_63,
'''num_examples''': 1_00_00,
},
{
'''name''': '''validation''',
'''num_bytes''': 23_84_18,
'''num_examples''': 10_00,
},
] , download_size=3_94_06_80 , dataset_size=2_58_99_81 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = getattr(dataset_infos['''default'''] , A_ ), getattr(expected_dataset_infos['''default'''] , A_ )
if key == "num_bytes":
assert is_apercent_close(A_ , A_ )
elif key == "splits":
assert list(A_ ) == list(A_ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 74 | 1 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
__a = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['memory_attention', 'encoder_attn'],
['attention', 'attn'],
['/', '.'],
['.LayerNorm.gamma', '_layer_norm.weight'],
['.LayerNorm.beta', '_layer_norm.bias'],
['r.layer_', 'r.layers.'],
['output_proj', 'out_proj'],
['ffn.dense_1.', 'fc2.'],
['ffn.dense.', 'fc1.'],
['ffn_layer_norm', 'final_layer_norm'],
['kernel', 'weight'],
['encoder_layer_norm.', 'encoder.layer_norm.'],
['decoder_layer_norm.', 'decoder.layer_norm.'],
['embeddings.weights', 'shared.weight'],
]
def __UpperCAmelCase ( a_: List[str] ):
for pegasus_name, hf_name in PATTERNS:
_UpperCAmelCase : str = k.replace(_lowerCAmelCase, _lowerCAmelCase )
return k
def __UpperCAmelCase ( a_: Dict, a_: List[Any] ):
_UpperCAmelCase : List[Any] = DEFAULTS.copy()
cfg_kwargs.update(_lowerCAmelCase )
_UpperCAmelCase : Dict = PegasusConfig(**_lowerCAmelCase )
_UpperCAmelCase : Any = PegasusForConditionalGeneration(_lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = torch_model.model.state_dict()
_UpperCAmelCase : List[Any] = {}
for k, v in tf_weights.items():
_UpperCAmelCase : int = rename_state_dict_key(_lowerCAmelCase )
if new_k not in sd:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
_UpperCAmelCase : Any = v.T
_UpperCAmelCase : Any = torch.tensor(_lowerCAmelCase, dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
_UpperCAmelCase : str = torch.zeros_like(mapping["shared.weight"][cfg.pad_token_id + 1] )
_UpperCAmelCase : Optional[Any] = mapping["shared.weight"]
_UpperCAmelCase : Dict = mapping["shared.weight"]
_UpperCAmelCase : Tuple = {k: torch.zeros_like(_lowerCAmelCase ) for k, v in sd.items() if k.endswith("bias" ) and k not in mapping}
mapping.update(**_lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase : int = torch_model.model.load_state_dict(_lowerCAmelCase, strict=_lowerCAmelCase )
_UpperCAmelCase : Any = [
k for k in missing if k not in ["encoder.embed_positions.weight", "decoder.embed_positions.weight"]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def __UpperCAmelCase ( a_: List[str]="./ckpt/aeslc/model.ckpt-32000" ):
_UpperCAmelCase : List[str] = tf.train.list_variables(_lowerCAmelCase )
_UpperCAmelCase : int = {}
_UpperCAmelCase : Any = ["Adafactor", "global_step"]
for name, shape in tqdm(_lowerCAmelCase, desc="converting tf checkpoint to dict" ):
_UpperCAmelCase : Optional[Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
_UpperCAmelCase : Tuple = tf.train.load_variable(_lowerCAmelCase, _lowerCAmelCase )
_UpperCAmelCase : Any = array
return tf_weights
def __UpperCAmelCase ( a_: Union[str, Any], a_: Optional[int] ):
# save tokenizer first
_UpperCAmelCase : Optional[Any] = Path(_lowerCAmelCase ).parent.name
_UpperCAmelCase : Optional[Any] = task_specific_params[f"""summarization_{dataset}"""]["max_position_embeddings"]
_UpperCAmelCase : Optional[Any] = PegasusTokenizer.from_pretrained("sshleifer/pegasus", model_max_length=_lowerCAmelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(_lowerCAmelCase )
# convert model
_UpperCAmelCase : List[str] = get_tf_weights_as_numpy(_lowerCAmelCase )
_UpperCAmelCase : str = task_specific_params[f"""summarization_{dataset}"""]
if dataset == "large":
_UpperCAmelCase : Union[str, Any] = task_specific_params
_UpperCAmelCase : Tuple = convert_pegasus(_lowerCAmelCase, _lowerCAmelCase )
torch_model.save_pretrained(_lowerCAmelCase )
_UpperCAmelCase : List[Any] = torch_model.state_dict()
sd.pop("model.decoder.embed_positions.weight" )
sd.pop("model.encoder.embed_positions.weight" )
torch.save(_lowerCAmelCase, Path(_lowerCAmelCase ) / "pytorch_model.bin" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.')
__a = parser.parse_args()
if args.save_dir is None:
__a = Path(args.tf_ckpt_path).parent.name
__a = os.path.join('pegasus', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir) | 145 |
"""simple docstring"""
from __future__ import annotations
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = []
create_all_state(1 , _lowerCAmelCase , _lowerCAmelCase , [] , _lowerCAmelCase )
return result
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
if level == 0:
total_list.append(current_list[:] )
return
for i in range(_lowerCAmelCase , total_number - level + 2 ):
current_list.append(_lowerCAmelCase )
create_all_state(i + 1 , _lowerCAmelCase , level - 1 , _lowerCAmelCase , _lowerCAmelCase )
current_list.pop()
def lowercase (_lowerCAmelCase ):
for i in total_list:
print(*_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = generate_all_combinations(n, k)
print_all_state(total_list)
| 301 | 0 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class snake_case_ ( A__ ,A__ ,unittest.TestCase ):
A_ = IFImgaImgSuperResolutionPipeline
A_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"}
A_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'} )
A_ = PipelineTesterMixin.required_optional_params - {"latents"}
def UpperCAmelCase__ ( self : str )->List[str]:
'''simple docstring'''
return self._get_superresolution_dummy_components()
def UpperCAmelCase__ ( self : Any , _snake_case : List[Any] , _snake_case : List[str]=0 )->Optional[int]:
'''simple docstring'''
if str(__A ).startswith("""mps""" ):
__lowerCAmelCase : Optional[int] = torch.manual_seed(__A )
else:
__lowerCAmelCase : Any = torch.Generator(device=__A ).manual_seed(__A )
__lowerCAmelCase : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(__A ) ).to(__A )
__lowerCAmelCase : List[Any] = floats_tensor((1, 3, 16, 16) , rng=random.Random(__A ) ).to(__A )
__lowerCAmelCase : Tuple = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCAmelCase__ ( self : Optional[Any] )->Dict:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def UpperCAmelCase__ ( self : Optional[int] )->Tuple:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def UpperCAmelCase__ ( self : Dict )->Any:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCAmelCase__ ( self : Optional[int] )->str:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCAmelCase__ ( self : Optional[int] )->Dict:
'''simple docstring'''
self._test_save_load_local()
def UpperCAmelCase__ ( self : Optional[Any] )->Union[str, Any]:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , ) | 351 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
_UpperCAmelCase = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
_UpperCAmelCase = {
'abeja/gpt-neox-japanese-2.7b': 2048,
}
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Optional[int] ) -> Optional[Any]:
with open(SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" ) as f:
__lowerCAmelCase : int = json.loads(f.read() )
__lowerCAmelCase : Dict = collections.OrderedDict()
__lowerCAmelCase : str = collections.OrderedDict()
__lowerCAmelCase : Union[str, Any] = collections.OrderedDict()
with open(SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" ) as f:
__lowerCAmelCase : Tuple = f.readlines()
__lowerCAmelCase : Tuple = [[t.rstrip("""\n""" )] if (t == """,""" or """,""" not in t) else t.rstrip("""\n""" ).split(""",""" ) for t in token]
for idx, b in enumerate(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = b
__lowerCAmelCase : Dict = idx
for wd in b:
__lowerCAmelCase : List[str] = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class snake_case_ ( __lowercase ):
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ['input_ids', 'attention_mask']
def __init__( self : str , _snake_case : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : Any="<|endoftext|>" , _snake_case : str="<|endoftext|>" , _snake_case : str="<|startoftext|>" , _snake_case : List[Any]="<|endoftext|>" , _snake_case : str=False , **_snake_case : List[Any] , )->Union[str, Any]:
'''simple docstring'''
super().__init__(
unk_token=_snake_case , pad_token=_snake_case , bos_token=_snake_case , eos_token=_snake_case , do_clean_text=_snake_case , **_snake_case , )
if not os.path.isfile(_snake_case ):
raise ValueError(
F'''Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'''
""" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
if not os.path.isfile(_snake_case ):
raise ValueError(
F'''Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'''
""" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
__lowerCAmelCase : Any = do_clean_text
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = load_vocab_and_emoji(_snake_case , _snake_case )
__lowerCAmelCase : int = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def UpperCAmelCase__ ( self : int )->str:
'''simple docstring'''
return len(self.raw_vocab )
def UpperCAmelCase__ ( self : Tuple )->Any:
'''simple docstring'''
return dict(self.raw_vocab , **self.added_tokens_encoder )
def UpperCAmelCase__ ( self : Any , _snake_case : str )->Optional[int]:
'''simple docstring'''
return self.subword_tokenizer.tokenize(_snake_case , clean=self.do_clean_text )
def UpperCAmelCase__ ( self : Optional[Any] , _snake_case : Optional[Any] )->Any:
'''simple docstring'''
return self.vocab.get(_snake_case , self.vocab.get(self.unk_token ) )
def UpperCAmelCase__ ( self : int , _snake_case : Any )->int:
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(_snake_case )
def UpperCAmelCase__ ( self : Optional[int] , _snake_case : int )->List[Any]:
'''simple docstring'''
__lowerCAmelCase : str = """""".join(_snake_case ).strip()
return out_string
def UpperCAmelCase__ ( self : List[str] , _snake_case : "Conversation" )->List[int]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_snake_case , add_special_tokens=_snake_case ) + [self.eos_token_id] )
if len(_snake_case ) > self.model_max_length:
__lowerCAmelCase : List[str] = input_ids[-self.model_max_length :]
return input_ids
def UpperCAmelCase__ ( self : Optional[Any] , _snake_case : str , _snake_case : Optional[str] = None )->Tuple[str]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = 0
if os.path.isdir(_snake_case ):
__lowerCAmelCase : Dict = os.path.join(
_snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCAmelCase : List[Any] = os.path.join(
_snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""emoji_file"""] )
else:
__lowerCAmelCase : Union[str, Any] = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""vocab_file"""]
)
__lowerCAmelCase : Dict = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""emoji_file"""]
)
with open(_snake_case , """w""" , encoding="""utf-8""" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
""" Please check that the vocabulary is not corrupted!""" )
__lowerCAmelCase : List[str] = token_index
writer.write(""",""".join(_snake_case ) + """\n""" )
index += 1
with open(_snake_case , """w""" , encoding="""utf-8""" ) as writer:
json.dump(self.emoji , _snake_case )
return vocab_file, emoji_file
class snake_case_ ( __lowercase ):
def __init__( self : Optional[Any] , _snake_case : str , _snake_case : Union[str, Any] , _snake_case : Optional[int] )->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = vocab # same as swe
__lowerCAmelCase : str = ids_to_tokens # same as bpe
__lowerCAmelCase : Dict = emoji
__lowerCAmelCase : int = np.max([len(_snake_case ) for w in self.vocab.keys()] )
__lowerCAmelCase : str = re.compile(R"""(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)""" )
__lowerCAmelCase : Optional[Any] = re.compile(R"""[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*""" )
__lowerCAmelCase : Tuple = re.compile(R"""[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}""" )
__lowerCAmelCase : Optional[Any] = re.compile(
R"""([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
__lowerCAmelCase : Union[str, Any] = re.compile(
R"""(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
__lowerCAmelCase : str = re.compile(
R"""((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*""" )
__lowerCAmelCase : List[Any] = """─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"""
__lowerCAmelCase : Union[str, Any] = """▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"""
__lowerCAmelCase : str = str.maketrans({k: """<BLOCK>""" for k in keisen + blocks} )
def __len__( self : int )->int:
'''simple docstring'''
return len(self.ids_to_tokens )
def UpperCAmelCase__ ( self : List[str] , _snake_case : Any )->str:
'''simple docstring'''
__lowerCAmelCase : List[str] = self.content_repattera.sub("""<URL>""" , _snake_case )
__lowerCAmelCase : Tuple = self.content_repattera.sub("""<EMAIL>""" , _snake_case )
__lowerCAmelCase : Optional[Any] = self.content_repattera.sub("""<TEL>""" , _snake_case )
__lowerCAmelCase : str = self.content_repattera.sub("""<DATE>""" , _snake_case )
__lowerCAmelCase : Tuple = self.content_repattera.sub("""<DATE>""" , _snake_case )
__lowerCAmelCase : Tuple = self.content_repattera.sub("""<PRICE>""" , _snake_case )
__lowerCAmelCase : List[Any] = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
__lowerCAmelCase : str = content.replace("""<BLOCK><BLOCK>""" , """<BLOCK>""" )
return content
def UpperCAmelCase__ ( self : str , _snake_case : List[Any] , _snake_case : Optional[int]=False )->int:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = text.replace(""" """ , """<SP>""" )
__lowerCAmelCase : Optional[int] = text.replace(""" """ , """<SP>""" )
__lowerCAmelCase : Union[str, Any] = text.replace("""\r\n""" , """<BR>""" )
__lowerCAmelCase : Tuple = text.replace("""\n""" , """<BR>""" )
__lowerCAmelCase : List[str] = text.replace("""\r""" , """<BR>""" )
__lowerCAmelCase : Dict = text.replace("""\t""" , """<TAB>""" )
__lowerCAmelCase : Dict = text.replace("""—""" , """ー""" )
__lowerCAmelCase : Tuple = text.replace("""−""" , """ー""" )
for k, v in self.emoji["emoji"].items():
if k in text:
__lowerCAmelCase : Optional[Any] = text.replace(_snake_case , _snake_case )
if clean:
__lowerCAmelCase : List[Any] = self.clean_text(_snake_case )
def check_simbol(_snake_case : List[str] ):
__lowerCAmelCase : Optional[int] = x.encode()
if len(_snake_case ) == 1 and len(_snake_case ) == 2:
__lowerCAmelCase : Optional[Any] = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xc2a1 and c <= 0xc2bf)
or (c >= 0xc780 and c <= 0xc783)
or (c >= 0xcab9 and c <= 0xcbbf)
or (c >= 0xcc80 and c <= 0xcda2)
):
return True
return False
def checkuae(_snake_case : Union[str, Any] ):
__lowerCAmelCase : Dict = x.encode()
if len(_snake_case ) == 1 and len(_snake_case ) == 3:
__lowerCAmelCase : List[str] = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xe2_8080 and c <= 0xe2_b07f:
return True
return False
__lowerCAmelCase : Dict = 0
__lowerCAmelCase : Dict = []
while pos < len(_snake_case ):
__lowerCAmelCase : str = min(len(_snake_case ) , pos + self.maxlen + 1 ) if text[pos] == """<""" else pos + 3
__lowerCAmelCase : Tuple = [] # (token_id, token, pos)
for e in range(_snake_case , _snake_case , -1 ):
__lowerCAmelCase : Optional[int] = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(_snake_case ) > 2:
__lowerCAmelCase : Tuple = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(_snake_case ) > 0:
# the smallest token_id is adopted
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : int = sorted(_snake_case , key=lambda _snake_case : x[0] )[0]
result.append(_snake_case )
__lowerCAmelCase : int = e
else:
__lowerCAmelCase : Dict = pos + 1
__lowerCAmelCase : Dict = text[pos:end]
if check_simbol(_snake_case ):
result.append("""<KIGOU>""" )
elif checkuae(_snake_case ):
result.append("""<U2000U2BFF>""" )
else:
for i in wd.encode("""utf-8""" ):
result.append("""<|byte%d|>""" % i )
__lowerCAmelCase : int = end
return result
def UpperCAmelCase__ ( self : List[str] , _snake_case : Optional[int] , _snake_case : List[Any]="\n" )->List[Any]:
'''simple docstring'''
__lowerCAmelCase : List[str] = []
__lowerCAmelCase : Union[str, Any] = []
__lowerCAmelCase : Optional[Any] = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(_snake_case ) > 0:
words.append(bytearray(_snake_case ).decode("""utf-8""" , errors="""replace""" ) )
__lowerCAmelCase : Optional[Any] = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["""emoji_inv"""][word] )
elif word == "<SP>":
words.append(""" """ )
elif word == "<BR>":
words.append(_snake_case )
elif word == "<TAB>":
words.append("""\t""" )
elif word == "<BLOCK>":
words.append("""▀""" )
elif word == "<KIGOU>":
words.append("""ǀ""" )
elif word == "<U2000U2BFF>":
words.append("""‖""" )
else:
words.append(_snake_case )
if len(_snake_case ) > 0:
words.append(bytearray(_snake_case ).decode("""utf-8""" , errors="""replace""" ) )
__lowerCAmelCase : Dict = """""".join(_snake_case )
return text | 232 | 0 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
if isinstance(__a , torch.Tensor ):
return image
elif isinstance(__a , PIL.Image.Image ):
UpperCamelCase__ : Tuple = [image]
if isinstance(image[0] , PIL.Image.Image ):
UpperCamelCase__ : str = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image]
UpperCamelCase__ : Optional[int] = np.concatenate(__a , axis=0 )
UpperCamelCase__ : List[str] = np.array(__a ).astype(np.floataa ) / 255.0
UpperCamelCase__ : int = image.transpose(0 , 3 , 1 , 2 )
UpperCamelCase__ : List[str] = 2.0 * image - 1.0
UpperCamelCase__ : int = torch.from_numpy(__a )
elif isinstance(image[0] , torch.Tensor ):
UpperCamelCase__ : int = torch.cat(__a , dim=0 )
return image
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=0.9_9_9_5 ) -> Dict:
if not isinstance(__a , np.ndarray ):
UpperCamelCase__ : str = True
UpperCamelCase__ : Optional[int] = va.device
UpperCamelCase__ : List[str] = va.cpu().numpy()
UpperCamelCase__ : int = va.cpu().numpy()
UpperCamelCase__ : List[str] = np.sum(va * va / (np.linalg.norm(__a ) * np.linalg.norm(__a )) )
if np.abs(__a ) > DOT_THRESHOLD:
UpperCamelCase__ : List[str] = (1 - t) * va + t * va
else:
UpperCamelCase__ : Union[str, Any] = np.arccos(__a )
UpperCamelCase__ : Optional[int] = np.sin(__a )
UpperCamelCase__ : Dict = theta_a * t
UpperCamelCase__ : Optional[Any] = np.sin(__a )
UpperCamelCase__ : Union[str, Any] = np.sin(theta_a - theta_t ) / sin_theta_a
UpperCamelCase__ : str = sin_theta_t / sin_theta_a
UpperCamelCase__ : Optional[int] = sa * va + sa * va
if inputs_are_torch:
UpperCamelCase__ : List[str] = torch.from_numpy(__a ).to(__a )
return va
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
UpperCamelCase__ : str = F.normalize(__a , dim=-1 )
UpperCamelCase__ : Any = F.normalize(__a , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Any:
for param in model.parameters():
UpperCamelCase__ : Any = value
class __a ( A__ ):
def __init__( self : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : List[Any]=None , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , clip_model=UpperCamelCase_ , tokenizer=UpperCamelCase_ , unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , feature_extractor=UpperCamelCase_ , coca_model=UpperCamelCase_ , coca_tokenizer=UpperCamelCase_ , coca_transform=UpperCamelCase_ , )
UpperCamelCase__ : List[Any] = (
feature_extractor.size
if isinstance(feature_extractor.size , UpperCamelCase_ )
else feature_extractor.size['''shortest_edge''']
)
UpperCamelCase__ : List[str] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , UpperCamelCase_ )
set_requires_grad(self.clip_model , UpperCamelCase_ )
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : Dict = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase__ : Any = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCamelCase_ )
def __lowercase ( self : int ):
'''simple docstring'''
self.enable_attention_slicing(UpperCamelCase_ )
def __lowercase ( self : List[str] ):
'''simple docstring'''
set_requires_grad(self.vae , UpperCamelCase_ )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
set_requires_grad(self.vae , UpperCamelCase_ )
def __lowercase ( self : List[str] ):
'''simple docstring'''
set_requires_grad(self.unet , UpperCamelCase_ )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
set_requires_grad(self.unet , UpperCamelCase_ )
def __lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
UpperCamelCase__ : int = min(int(num_inference_steps * strength ) , UpperCamelCase_ )
UpperCamelCase__ : Dict = max(num_inference_steps - init_timestep , 0 )
UpperCamelCase__ : int = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __lowercase ( self : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any]=None ):
'''simple docstring'''
if not isinstance(UpperCamelCase_ , torch.Tensor ):
raise ValueError(F'`image` has to be of type `torch.Tensor` but is {type(UpperCamelCase_ )}' )
UpperCamelCase__ : List[str] = image.to(device=UpperCamelCase_ , dtype=UpperCamelCase_ )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase__ : int = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(UpperCamelCase_ )
]
UpperCamelCase__ : str = torch.cat(UpperCamelCase_ , dim=0 )
else:
UpperCamelCase__ : Tuple = self.vae.encode(UpperCamelCase_ ).latent_dist.sample(UpperCamelCase_ )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCamelCase__ : str = 0.1_8_2_1_5 * init_latents
UpperCamelCase__ : Optional[Any] = init_latents.repeat_interleave(UpperCamelCase_ , dim=0 )
UpperCamelCase__ : Any = randn_tensor(init_latents.shape , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ )
# get latents
UpperCamelCase__ : Tuple = self.scheduler.add_noise(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ : List[Any] = init_latents
return latents
def __lowercase ( self : str , SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ : Any = self.coca_transform(UpperCamelCase_ ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
UpperCamelCase__ : List[str] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
UpperCamelCase__ : int = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("<end_of_text>" )[0].replace("<start_of_text>" , "" ).rstrip(" .," )
def __lowercase ( self : Tuple , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = self.feature_extractor.preprocess(UpperCamelCase_ )
UpperCamelCase__ : Any = torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half()
UpperCamelCase__ : str = self.clip_model.get_image_features(UpperCamelCase_ )
UpperCamelCase__ : List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=UpperCamelCase_ )
UpperCamelCase__ : Union[str, Any] = image_embeddings_clip.repeat_interleave(UpperCamelCase_ , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def __lowercase ( self : Any , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Tuple , ):
'''simple docstring'''
UpperCamelCase__ : List[str] = latents.detach().requires_grad_()
UpperCamelCase__ : List[Any] = self.scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
# predict the noise residual
UpperCamelCase__ : List[Any] = self.unet(UpperCamelCase_ , UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
UpperCamelCase__ : Union[str, Any] = self.scheduler.alphas_cumprod[timestep]
UpperCamelCase__ : Optional[int] = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCamelCase__ : List[Any] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
UpperCamelCase__ : Any = torch.sqrt(UpperCamelCase_ )
UpperCamelCase__ : Dict = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , UpperCamelCase_ ):
UpperCamelCase__ : Any = self.scheduler.sigmas[index]
UpperCamelCase__ : Union[str, Any] = latents - sigma * noise_pred
else:
raise ValueError(F'scheduler type {type(self.scheduler )} not supported' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCamelCase__ : Dict = 1 / 0.1_8_2_1_5 * sample
UpperCamelCase__ : List[Any] = self.vae.decode(UpperCamelCase_ ).sample
UpperCamelCase__ : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase__ : List[Any] = transforms.Resize(self.feature_extractor_size )(UpperCamelCase_ )
UpperCamelCase__ : Optional[int] = self.normalize(UpperCamelCase_ ).to(latents.dtype )
UpperCamelCase__ : Union[str, Any] = self.clip_model.get_image_features(UpperCamelCase_ )
UpperCamelCase__ : Any = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=UpperCamelCase_ )
UpperCamelCase__ : Any = spherical_dist_loss(UpperCamelCase_ , UpperCamelCase_ ).mean() * clip_guidance_scale
UpperCamelCase__ : str = -torch.autograd.grad(UpperCamelCase_ , UpperCamelCase_ )[0]
if isinstance(self.scheduler , UpperCamelCase_ ):
UpperCamelCase__ : List[Any] = latents.detach() + grads * (sigma**2)
UpperCamelCase__ : Optional[int] = noise_pred_original
else:
UpperCamelCase__ : Tuple = noise_pred_original - torch.sqrt(UpperCamelCase_ ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : str , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Dict = None , SCREAMING_SNAKE_CASE : Dict = None , SCREAMING_SNAKE_CASE : Union[str, Any] = 5_12 , SCREAMING_SNAKE_CASE : List[str] = 5_12 , SCREAMING_SNAKE_CASE : Optional[int] = 0.6 , SCREAMING_SNAKE_CASE : Any = 50 , SCREAMING_SNAKE_CASE : Dict = 7.5 , SCREAMING_SNAKE_CASE : Union[str, Any] = 1 , SCREAMING_SNAKE_CASE : List[Any] = 0.0 , SCREAMING_SNAKE_CASE : str = 1_00 , SCREAMING_SNAKE_CASE : Dict = None , SCREAMING_SNAKE_CASE : int = "pil" , SCREAMING_SNAKE_CASE : Union[str, Any] = True , SCREAMING_SNAKE_CASE : Any = 0.8 , SCREAMING_SNAKE_CASE : Dict = 0.1 , SCREAMING_SNAKE_CASE : str = 0.1 , ):
'''simple docstring'''
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != batch_size:
raise ValueError(F'You have passed {batch_size} batch_size, but only {len(UpperCamelCase_ )} generators.' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if isinstance(UpperCamelCase_ , torch.Generator ) and batch_size > 1:
UpperCamelCase__ : List[Any] = [generator] + [None] * (batch_size - 1)
UpperCamelCase__ : Union[str, Any] = [
('''model''', self.coca_model is None),
('''tokenizer''', self.coca_tokenizer is None),
('''transform''', self.coca_transform is None),
]
UpperCamelCase__ : Union[str, Any] = [x[0] for x in coca_is_none if x[1]]
UpperCamelCase__ : Union[str, Any] = ''', '''.join(UpperCamelCase_ )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(UpperCamelCase_ ):
raise ValueError(
F'Content prompt is None and CoCa [{coca_is_none_str}] is None.'
F'Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' )
UpperCamelCase__ : List[str] = self.get_image_description(UpperCamelCase_ )
if style_prompt is None:
if len(UpperCamelCase_ ):
raise ValueError(
F'Style prompt is None and CoCa [{coca_is_none_str}] is None.'
F' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' )
UpperCamelCase__ : List[Any] = self.get_image_description(UpperCamelCase_ )
# get prompt text embeddings for content and style
UpperCamelCase__ : Union[str, Any] = self.tokenizer(
UpperCamelCase_ , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=UpperCamelCase_ , return_tensors="pt" , )
UpperCamelCase__ : Any = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
UpperCamelCase__ : Any = self.tokenizer(
UpperCamelCase_ , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=UpperCamelCase_ , return_tensors="pt" , )
UpperCamelCase__ : Tuple = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
UpperCamelCase__ : str = slerp(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# duplicate text embeddings for each generation per prompt
UpperCamelCase__ : List[str] = text_embeddings.repeat_interleave(UpperCamelCase_ , dim=0 )
# set timesteps
UpperCamelCase__ : Dict = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
UpperCamelCase__ : Any = {}
if accepts_offset:
UpperCamelCase__ : Tuple = 1
self.scheduler.set_timesteps(UpperCamelCase_ , **UpperCamelCase_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
UpperCamelCase__ : int = self.get_timesteps(UpperCamelCase_ , UpperCamelCase_ , self.device )
UpperCamelCase__ : Optional[Any] = timesteps[:1].repeat(UpperCamelCase_ )
# Preprocess image
UpperCamelCase__ : Any = preprocess(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ : Dict = self.prepare_latents(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , text_embeddings.dtype , self.device , UpperCamelCase_ )
UpperCamelCase__ : Optional[int] = preprocess(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ : Optional[Any] = self.prepare_latents(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , text_embeddings.dtype , self.device , UpperCamelCase_ )
UpperCamelCase__ : int = slerp(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if clip_guidance_scale > 0:
UpperCamelCase__ : Optional[Any] = self.get_clip_image_embeddings(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ : List[str] = self.get_clip_image_embeddings(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ : Dict = slerp(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCamelCase__ : Any = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase__ : str = content_text_input.input_ids.shape[-1]
UpperCamelCase__ : List[str] = self.tokenizer([""] , padding="max_length" , max_length=UpperCamelCase_ , return_tensors="pt" )
UpperCamelCase__ : Optional[int] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
UpperCamelCase__ : Optional[Any] = uncond_embeddings.repeat_interleave(UpperCamelCase_ , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase__ : Optional[int] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCamelCase__ : Optional[int] = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
UpperCamelCase__ : Dict = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
UpperCamelCase__ : Any = torch.randn(UpperCamelCase_ , generator=UpperCamelCase_ , device="cpu" , dtype=UpperCamelCase_ ).to(
self.device )
else:
UpperCamelCase__ : int = torch.randn(UpperCamelCase_ , generator=UpperCamelCase_ , device=self.device , dtype=UpperCamelCase_ )
else:
if latents.shape != latents_shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
UpperCamelCase__ : Union[str, Any] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase__ : Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase__ : Any = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase__ : Any = {}
if accepts_eta:
UpperCamelCase__ : Union[str, Any] = eta
# check if the scheduler accepts generator
UpperCamelCase__ : int = '''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
UpperCamelCase__ : List[Any] = generator
with self.progress_bar(total=UpperCamelCase_ ):
for i, t in enumerate(UpperCamelCase_ ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase__ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase__ : str = self.scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
# predict the noise residual
UpperCamelCase__ : str = self.unet(UpperCamelCase_ , UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase__ : List[str] = noise_pred.chunk(2 )
UpperCamelCase__ : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
UpperCamelCase__ : Dict = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
UpperCamelCase__ : Optional[int] = self.cond_fn(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ : Dict = self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCamelCase__ : Any = 1 / 0.1_8_2_1_5 * latents
UpperCamelCase__ : Union[str, Any] = self.vae.decode(UpperCamelCase_ ).sample
UpperCamelCase__ : Any = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase__ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase__ : Tuple = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=UpperCamelCase_ , nsfw_content_detected=UpperCamelCase_ ) | 189 |
'''simple docstring'''
from collections import defaultdict
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
UpperCamelCase__ :Union[str, Any] = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(UpperCamelCase_ ) )
]
UpperCamelCase__ :str = defaultdict(UpperCamelCase_ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
UpperCamelCase__ :Optional[int] = (1 << len(UpperCamelCase_ )) - 1
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
UpperCamelCase__ :str = self.count_ways_until(UpperCamelCase_ , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
UpperCamelCase__ :Optional[int] = total_ways_util
return self.dp[mask][task_no]
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
for i in range(len(UpperCamelCase_ ) ):
for j in task_performed[i]:
self.task[j].append(UpperCamelCase_ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
__snake_case = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
__snake_case = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
) | 97 | 0 |
"""simple docstring"""
from __future__ import annotations
from random import choice
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return choice(_SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = random_pivot(_SCREAMING_SNAKE_CASE )
# partition based on pivot
# linear time
UpperCamelCase = [e for e in lst if e < pivot]
UpperCamelCase = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(_SCREAMING_SNAKE_CASE ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(_SCREAMING_SNAKE_CASE ) < k - 1:
return kth_number(_SCREAMING_SNAKE_CASE , k - len(_SCREAMING_SNAKE_CASE ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 244 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '''▁'''
lowerCAmelCase__ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
}
}
lowerCAmelCase__ = {
'''facebook/mbart-large-en-ro''': 1_024,
'''facebook/mbart-large-cc25''': 1_024,
}
# fmt: off
lowerCAmelCase__ = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class _lowerCamelCase ( _lowercase ):
UpperCAmelCase_ = VOCAB_FILES_NAMES
UpperCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ = ["input_ids", "attention_mask"]
UpperCAmelCase_ = []
UpperCAmelCase_ = []
def __init__(self , __a , __a="<s>" , __a="</s>" , __a="</s>" , __a="<s>" , __a="<unk>" , __a="<pad>" , __a="<mask>" , __a=None , __a=None , __a=None , __a = None , __a=None , **__a , ) -> Dict:
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , tokenizer_file=__a , src_lang=__a , tgt_lang=__a , additional_special_tokens=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__a ) )
UpperCamelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCamelCase = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCamelCase = 1
UpperCamelCase = len(self.sp_model )
UpperCamelCase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__a )
}
UpperCamelCase = {v: k for k, v in self.lang_code_to_id.items()}
UpperCamelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCamelCase = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
UpperCamelCase = src_lang if src_lang is not None else "en_XX"
UpperCamelCase = self.lang_code_to_id[self._src_lang]
UpperCamelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__(self ) -> Any:
UpperCamelCase = self.__dict__.copy()
UpperCamelCase = None
UpperCamelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__(self , __a ) -> Tuple:
UpperCamelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCamelCase = {}
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def snake_case_ (self ) -> Any:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def snake_case_ (self ) -> str:
return self._src_lang
@src_lang.setter
def snake_case_ (self , __a ) -> None:
UpperCamelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def snake_case_ (self , __a , __a = None , __a = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
UpperCamelCase = [1] * len(self.prefix_tokens )
UpperCamelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__a )) + suffix_ones
return prefix_ones + ([0] * len(__a )) + ([0] * len(__a )) + suffix_ones
def snake_case_ (self , __a , __a = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def snake_case_ (self , __a , __a = None ) -> List[int]:
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case_ (self , __a , __a , __a , __a , **__a ) -> Optional[int]:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
UpperCamelCase = src_lang
UpperCamelCase = self(__a , add_special_tokens=__a , return_tensors=__a , **__a )
UpperCamelCase = self.convert_tokens_to_ids(__a )
UpperCamelCase = tgt_lang_id
return inputs
def snake_case_ (self ) -> List[str]:
UpperCamelCase = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ (self , __a ) -> List[str]:
return self.sp_model.encode(__a , out_type=__a )
def snake_case_ (self , __a ) -> Tuple:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase = self.sp_model.PieceToId(__a )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def snake_case_ (self , __a ) -> Optional[int]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def snake_case_ (self , __a ) -> Optional[int]:
UpperCamelCase = "".join(__a ).replace(__a , " " ).strip()
return out_string
def snake_case_ (self , __a , __a = None ) -> Tuple[str]:
if not os.path.isdir(__a ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCamelCase = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , "wb" ) as fi:
UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,)
def snake_case_ (self , __a , __a = "en_XX" , __a = None , __a = "ro_RO" , **__a , ) -> BatchEncoding:
UpperCamelCase = src_lang
UpperCamelCase = tgt_lang
return super().prepare_seqaseq_batch(__a , __a , **__a )
def snake_case_ (self ) -> str:
return self.set_src_lang_special_tokens(self.src_lang )
def snake_case_ (self ) -> Optional[int]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def snake_case_ (self , __a ) -> None:
UpperCamelCase = self.lang_code_to_id[src_lang]
UpperCamelCase = []
UpperCamelCase = [self.eos_token_id, self.cur_lang_code]
def snake_case_ (self , __a ) -> None:
UpperCamelCase = self.lang_code_to_id[lang]
UpperCamelCase = []
UpperCamelCase = [self.eos_token_id, self.cur_lang_code]
| 244 | 1 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger()
def A_ ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase = True ):
'''simple docstring'''
print(f"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
snake_case_ :Optional[int] = timm.create_model("""levit_128s""", pretrained=UpperCAmelCase_ )
else:
snake_case_ :Any = timm.create_model("""levit_128""", pretrained=UpperCAmelCase_ )
if hidden_sizes == 192:
snake_case_ :Optional[int] = timm.create_model("""levit_192""", pretrained=UpperCAmelCase_ )
if hidden_sizes == 256:
snake_case_ :Optional[Any] = timm.create_model("""levit_256""", pretrained=UpperCAmelCase_ )
if hidden_sizes == 384:
snake_case_ :str = timm.create_model("""levit_384""", pretrained=UpperCAmelCase_ )
from_model.eval()
snake_case_ :Tuple = LevitForImageClassificationWithTeacher(UpperCAmelCase_ ).eval()
snake_case_ :List[Any] = OrderedDict()
snake_case_ :str = from_model.state_dict()
snake_case_ :Optional[int] = list(from_model.state_dict().keys() )
snake_case_ :Union[str, Any] = list(our_model.state_dict().keys() )
print(len(UpperCAmelCase_ ), len(UpperCAmelCase_ ) )
for i in range(len(UpperCAmelCase_ ) ):
snake_case_ :Union[str, Any] = weights[og_keys[i]]
our_model.load_state_dict(UpperCAmelCase_ )
snake_case_ :Tuple = torch.randn((2, 3, 224, 224) )
snake_case_ :Tuple = from_model(UpperCAmelCase_ )
snake_case_ :Any = our_model(UpperCAmelCase_ ).logits
assert torch.allclose(UpperCAmelCase_, UpperCAmelCase_ ), "The model logits don't match the original one."
snake_case_ :Optional[Any] = name
print(UpperCAmelCase_ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
snake_case_ :int = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f"""Pushed {checkpoint_name}""" )
def A_ ( _lowercase, _lowercase = None, _lowercase = True ):
'''simple docstring'''
snake_case_ :Tuple = 'imagenet-1k-id2label.json'
snake_case_ :Tuple = 1000
snake_case_ :Tuple = (1, num_labels)
snake_case_ :Optional[Any] = 'huggingface/label-files'
snake_case_ :Optional[int] = num_labels
snake_case_ :Union[str, Any] = json.load(open(hf_hub_download(UpperCAmelCase_, UpperCAmelCase_, repo_type="""dataset""" ), """r""" ) )
snake_case_ :Tuple = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
snake_case_ :Tuple = idalabel
snake_case_ :Optional[Any] = {v: k for k, v in idalabel.items()}
snake_case_ :str = partial(UpperCAmelCase_, num_labels=UpperCAmelCase_, idalabel=UpperCAmelCase_, labelaid=UpperCAmelCase_ )
snake_case_ :Tuple = {
'levit-128S': 128,
'levit-128': 128,
'levit-192': 192,
'levit-256': 256,
'levit-384': 384,
}
snake_case_ :Optional[Any] = {
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384], num_attention_heads=[4, 6, 8], depths=[2, 3, 4], key_dim=[16, 16, 16], drop_path_rate=0, ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384], num_attention_heads=[4, 8, 12], depths=[4, 4, 4], key_dim=[16, 16, 16], drop_path_rate=0, ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384], num_attention_heads=[3, 5, 6], depths=[4, 4, 4], key_dim=[32, 32, 32], drop_path_rate=0, ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512], num_attention_heads=[4, 6, 8], depths=[4, 4, 4], key_dim=[32, 32, 32], drop_path_rate=0, ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768], num_attention_heads=[6, 9, 12], depths=[4, 4, 4], key_dim=[32, 32, 32], drop_path_rate=0.1, ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name], UpperCAmelCase_, names_to_config[model_name], UpperCAmelCase_, UpperCAmelCase_ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name], UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ )
return config, expected_shape
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help="The name of the model you wish to convert, it must be one of the supported Levit* architecture,",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="levit-dump-folder/",
type=Path,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
__a = parser.parse_args()
__a = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 66 |
'''simple docstring'''
import string
from math import logaa
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = document.translate(
str.maketrans('' , '' , string.punctuation ) ).replace('\n' , '' )
UpperCAmelCase : Optional[Any] = document_without_punctuation.split(' ' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : List[Any] = corpus.lower().translate(
str.maketrans('' , '' , string.punctuation ) ) # strip all punctuation and replace it with ''
UpperCAmelCase : Tuple = corpus_without_punctuation.split('\n' )
UpperCAmelCase : List[Any] = term.lower()
return (len([doc for doc in docs if term in doc] ), len(UpperCAmelCase_ ))
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=False ):
if smoothing:
if n == 0:
raise ValueError('log10(0) is undefined.' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('df must be > 0' )
elif n == 0:
raise ValueError('log10(0) is undefined.' )
return round(logaa(n / df ) , 3 )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return round(tf * idf , 3 )
| 151 | 0 |
'''simple docstring'''
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class __magic_name__ ( __lowerCAmelCase ):
UpperCAmelCase =(DDIMParallelScheduler,)
UpperCAmelCase =(('''eta''', 0.0), ('''num_inference_steps''', 5_0))
def lowerCAmelCase ( self , **snake_case) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Any ={
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**lowerCAmelCase_)
return config
def lowerCAmelCase ( self , **snake_case) -> int:
'''simple docstring'''
_UpperCAmelCase : Tuple =self.scheduler_classes[0]
_UpperCAmelCase : List[Any] =self.get_scheduler_config(**lowerCAmelCase_)
_UpperCAmelCase : Union[str, Any] =scheduler_class(**lowerCAmelCase_)
_UpperCAmelCase , _UpperCAmelCase : Tuple =1_0, 0.0
_UpperCAmelCase : List[str] =self.dummy_model()
_UpperCAmelCase : str =self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase_)
for t in scheduler.timesteps:
_UpperCAmelCase : List[Any] =model(lowerCAmelCase_ , lowerCAmelCase_)
_UpperCAmelCase : Optional[Any] =scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_).prev_sample
return sample
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
for timesteps in [1_0_0, 5_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_)
def lowerCAmelCase ( self) -> Any:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCAmelCase_)
_UpperCAmelCase : Optional[Any] =self.scheduler_classes[0]
_UpperCAmelCase : Tuple =self.get_scheduler_config(steps_offset=1)
_UpperCAmelCase : Optional[int] =scheduler_class(**lowerCAmelCase_)
scheduler.set_timesteps(5)
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_0_1, 6_0_1, 4_0_1, 2_0_1, 1]))
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=lowerCAmelCase_ , beta_end=lowerCAmelCase_)
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase_)
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase_)
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase_)
def lowerCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=lowerCAmelCase_)
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=lowerCAmelCase_)
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
self.check_over_configs(thresholding=lowerCAmelCase_)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase_ , prediction_type=lowerCAmelCase_ , sample_max_value=lowerCAmelCase_ , )
def lowerCAmelCase ( self) -> Dict:
'''simple docstring'''
for t in [1, 1_0, 4_9]:
self.check_over_forward(time_step=lowerCAmelCase_)
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
for t, num_inference_steps in zip([1, 1_0, 5_0] , [1_0, 5_0, 5_0_0]):
self.check_over_forward(time_step=lowerCAmelCase_ , num_inference_steps=lowerCAmelCase_)
def lowerCAmelCase ( self) -> List[Any]:
'''simple docstring'''
for t, eta in zip([1, 1_0, 4_9] , [0.0, 0.5, 1.0]):
self.check_over_forward(time_step=lowerCAmelCase_ , eta=lowerCAmelCase_)
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Any =self.scheduler_classes[0]
_UpperCAmelCase : Tuple =self.get_scheduler_config()
_UpperCAmelCase : int =scheduler_class(**lowerCAmelCase_)
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_2_0 , 4_0_0) - 0.1_47_71)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_8_0 , 9_6_0) - 0.3_24_60)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 , 4_8_6) - 0.0_09_79)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 , 9_9_8) - 0.02)) < 1E-5
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Optional[int] =self.scheduler_classes[0]
_UpperCAmelCase : Dict =self.get_scheduler_config()
_UpperCAmelCase : int =scheduler_class(**lowerCAmelCase_)
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] =1_0, 0.0
scheduler.set_timesteps(lowerCAmelCase_)
_UpperCAmelCase : Optional[Any] =self.dummy_model()
_UpperCAmelCase : Union[str, Any] =self.dummy_sample_deter
_UpperCAmelCase : Dict =self.dummy_sample_deter + 0.1
_UpperCAmelCase : List[str] =self.dummy_sample_deter - 0.1
_UpperCAmelCase : Optional[int] =samplea.shape[0]
_UpperCAmelCase : Tuple =torch.stack([samplea, samplea, samplea] , dim=0)
_UpperCAmelCase : Union[str, Any] =torch.arange(lowerCAmelCase_)[0:3, None].repeat(1 , lowerCAmelCase_)
_UpperCAmelCase : Optional[Any] =model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1))
_UpperCAmelCase : Dict =scheduler.batch_step_no_noise(lowerCAmelCase_ , timesteps.flatten(0 , 1) , samples.flatten(0 , 1) , lowerCAmelCase_)
_UpperCAmelCase : Optional[int] =torch.sum(torch.abs(lowerCAmelCase_))
_UpperCAmelCase : str =torch.mean(torch.abs(lowerCAmelCase_))
assert abs(result_sum.item() - 11_47.79_04) < 1E-2
assert abs(result_mean.item() - 0.49_82) < 1E-3
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : str =self.full_loop()
_UpperCAmelCase : str =torch.sum(torch.abs(lowerCAmelCase_))
_UpperCAmelCase : Optional[int] =torch.mean(torch.abs(lowerCAmelCase_))
assert abs(result_sum.item() - 1_72.00_67) < 1E-2
assert abs(result_mean.item() - 0.22_39_67) < 1E-3
def lowerCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] =self.full_loop(prediction_type='v_prediction')
_UpperCAmelCase : List[str] =torch.sum(torch.abs(lowerCAmelCase_))
_UpperCAmelCase : int =torch.mean(torch.abs(lowerCAmelCase_))
assert abs(result_sum.item() - 52.53_02) < 1E-2
assert abs(result_mean.item() - 0.06_84) < 1E-3
def lowerCAmelCase ( self) -> Any:
'''simple docstring'''
# We specify different beta, so that the first alpha is 0.99
_UpperCAmelCase : List[str] =self.full_loop(set_alpha_to_one=lowerCAmelCase_ , beta_start=0.01)
_UpperCAmelCase : int =torch.sum(torch.abs(lowerCAmelCase_))
_UpperCAmelCase : int =torch.mean(torch.abs(lowerCAmelCase_))
assert abs(result_sum.item() - 1_49.82_95) < 1E-2
assert abs(result_mean.item() - 0.19_51) < 1E-3
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
# We specify different beta, so that the first alpha is 0.99
_UpperCAmelCase : str =self.full_loop(set_alpha_to_one=lowerCAmelCase_ , beta_start=0.01)
_UpperCAmelCase : Optional[int] =torch.sum(torch.abs(lowerCAmelCase_))
_UpperCAmelCase : Tuple =torch.mean(torch.abs(lowerCAmelCase_))
assert abs(result_sum.item() - 1_49.07_84) < 1E-2
assert abs(result_mean.item() - 0.19_41) < 1E-3
| 365 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase__ ( __lowerCamelCase : list[int] ):
'''simple docstring'''
if not nums:
return 0
_UpperCAmelCase : Tuple =nums[0]
_UpperCAmelCase : int =0
for num in nums[1:]:
_UpperCAmelCase , _UpperCAmelCase : Optional[int] =(
max_excluding + num,
max(__lowerCamelCase , __lowerCamelCase ),
)
return max(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 242 | 0 |
"""simple docstring"""
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1024 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = [], []
__SCREAMING_SNAKE_CASE = list(zip(lowerCAmelCase_ , lowerCAmelCase_ ) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sorted_examples[0]
def is_too_big(lowerCAmelCase_ ):
return tok(lowerCAmelCase_ , return_tensors="pt" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
__SCREAMING_SNAKE_CASE = new_src + " " + src
__SCREAMING_SNAKE_CASE = new_tgt + " " + tgt
if is_too_big(lowerCAmelCase_ ) or is_too_big(lowerCAmelCase_ ): # cant fit, finalize example
finished_src.append(lowerCAmelCase_ )
finished_tgt.append(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = src, tgt
else: # can fit, keep adding
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(lowerCAmelCase_ )
finished_tgt.append(lowerCAmelCase_ )
return finished_src, finished_tgt
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = Path(lowerCAmelCase_ )
save_path.mkdir(exist_ok=lowerCAmelCase_ )
for split in ["train"]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
__SCREAMING_SNAKE_CASE = [x.rstrip() for x in Path(lowerCAmelCase_ ).open().readlines()]
__SCREAMING_SNAKE_CASE = [x.rstrip() for x in Path(lowerCAmelCase_ ).open().readlines()]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = pack_examples(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
print(f"""packed {split} split from {len(lowerCAmelCase_ )} examples -> {len(lowerCAmelCase_ )}.""" )
Path(save_path / f"""{split}.source""" ).open("w" ).write("\n".join(lowerCAmelCase_ ) )
Path(save_path / f"""{split}.target""" ).open("w" ).write("\n".join(lowerCAmelCase_ ) )
for split in ["val", "test"]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
shutil.copyfile(lowerCAmelCase_ , save_path / f"""{split}.source""" )
shutil.copyfile(lowerCAmelCase_ , save_path / f"""{split}.target""" )
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--tok_name" , type=lowerCAmelCase_ , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("--max_seq_len" , type=lowerCAmelCase_ , default=128 )
parser.add_argument("--data_dir" , type=lowerCAmelCase_ )
parser.add_argument("--save_path" , type=lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = parser.parse_args()
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(lowerCAmelCase_ , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 54 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
@staticmethod
@abstractmethod
def UpperCAmelCase_ ( UpperCAmelCase__ : ArgumentParser ) -> int:
raise NotImplementedError()
@abstractmethod
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
raise NotImplementedError()
| 54 | 1 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase ( a ) -> bool:
'''simple docstring'''
__magic_name__ = str(a )
return len(a ) == 9 and set(a ) == set('''123456789''' )
def UpperCamelCase ( ) -> int | None:
'''simple docstring'''
for base_num in range(9999 , 4999 , -1 ):
__magic_name__ = 10_0002 * base_num
if is_9_pandigital(a ):
return candidate
for base_num in range(333 , 99 , -1 ):
__magic_name__ = 100_2003 * base_num
if is_9_pandigital(a ):
return candidate
return None
if __name__ == "__main__":
print(F'''{solution() = }''')
| 98 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _SCREAMING_SNAKE_CASE :
def __init__( self : str , a__ : Union[str, Any] , a__ : Dict=13 , a__ : List[str]=32 , a__ : List[Any]=2 , a__ : List[str]=3 , a__ : Union[str, Any]=16 , a__ : Dict=[1, 2, 1] , a__ : Optional[Any]=[2, 2, 4] , a__ : List[str]=2 , a__ : Optional[Any]=2.0 , a__ : Union[str, Any]=True , a__ : int=0.0 , a__ : int=0.0 , a__ : Tuple=0.1 , a__ : List[str]="gelu" , a__ : str=False , a__ : Optional[int]=True , a__ : List[Any]=0.02 , a__ : Any=1E-5 , a__ : int=True , a__ : List[Any]=None , a__ : Dict=True , a__ : Optional[int]=10 , a__ : Any=8 , ):
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = image_size
__magic_name__ = patch_size
__magic_name__ = num_channels
__magic_name__ = embed_dim
__magic_name__ = depths
__magic_name__ = num_heads
__magic_name__ = window_size
__magic_name__ = mlp_ratio
__magic_name__ = qkv_bias
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = drop_path_rate
__magic_name__ = hidden_act
__magic_name__ = use_absolute_embeddings
__magic_name__ = patch_norm
__magic_name__ = layer_norm_eps
__magic_name__ = initializer_range
__magic_name__ = is_training
__magic_name__ = scope
__magic_name__ = use_labels
__magic_name__ = type_sequence_label_size
__magic_name__ = encoder_stride
def snake_case__ ( self : List[Any] ):
__magic_name__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self : Optional[int] ):
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def snake_case__ ( self : Optional[int] , a__ : Union[str, Any] , a__ : Union[str, Any] , a__ : Optional[int] ):
__magic_name__ = SwinvaModel(config=a__ )
model.to(a__ )
model.eval()
__magic_name__ = model(a__ )
__magic_name__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__magic_name__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def snake_case__ ( self : Optional[Any] , a__ : Optional[Any] , a__ : str , a__ : int ):
__magic_name__ = SwinvaForMaskedImageModeling(config=a__ )
model.to(a__ )
model.eval()
__magic_name__ = model(a__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__magic_name__ = 1
__magic_name__ = SwinvaForMaskedImageModeling(a__ )
model.to(a__ )
model.eval()
__magic_name__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def snake_case__ ( self : List[str] , a__ : List[str] , a__ : List[Any] , a__ : Any ):
__magic_name__ = self.type_sequence_label_size
__magic_name__ = SwinvaForImageClassification(a__ )
model.to(a__ )
model.eval()
__magic_name__ = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def snake_case__ ( self : Optional[Any] ):
__magic_name__ = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ = config_and_inputs
__magic_name__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __a ,__a ,unittest.TestCase ):
__SCREAMING_SNAKE_CASE :int = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
__SCREAMING_SNAKE_CASE :Tuple = (
{"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE :Union[str, Any] = False
__SCREAMING_SNAKE_CASE :List[Any] = False
__SCREAMING_SNAKE_CASE :Dict = False
__SCREAMING_SNAKE_CASE :Union[str, Any] = False
def snake_case__ ( self : str ):
__magic_name__ = SwinvaModelTester(self )
__magic_name__ = ConfigTester(self , config_class=a__ , embed_dim=37 )
def snake_case__ ( self : Tuple ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__ ( self : List[Any] ):
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' )
def snake_case__ ( self : str ):
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''' )
def snake_case__ ( self : Union[str, Any] ):
pass
def snake_case__ ( self : Optional[int] ):
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = model_class(a__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__magic_name__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ , nn.Linear ) )
def snake_case__ ( self : Union[str, Any] ):
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = model_class(a__ )
__magic_name__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ = [*signature.parameters.keys()]
__magic_name__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__ )
def snake_case__ ( self : int ):
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = True
for model_class in self.all_model_classes:
__magic_name__ = True
__magic_name__ = False
__magic_name__ = True
__magic_name__ = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
__magic_name__ = model(**self._prepare_for_class(a__ , a__ ) )
__magic_name__ = outputs.attentions
__magic_name__ = len(self.model_tester.depths )
self.assertEqual(len(a__ ) , a__ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__magic_name__ = True
__magic_name__ = config.window_size**2
__magic_name__ = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
__magic_name__ = model(**self._prepare_for_class(a__ , a__ ) )
__magic_name__ = outputs.attentions
self.assertEqual(len(a__ ) , a__ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
__magic_name__ = len(a__ )
# Check attention is always last and order is fine
__magic_name__ = True
__magic_name__ = True
__magic_name__ = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
__magic_name__ = model(**self._prepare_for_class(a__ , a__ ) )
if hasattr(self.model_tester , '''num_hidden_states_types''' ):
__magic_name__ = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
__magic_name__ = 2
self.assertEqual(out_len + added_hidden_states , len(a__ ) )
__magic_name__ = outputs.attentions
self.assertEqual(len(a__ ) , a__ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def snake_case__ ( self : Any , a__ : Dict , a__ : str , a__ : str , a__ : List[Any] ):
__magic_name__ = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
__magic_name__ = model(**self._prepare_for_class(a__ , a__ ) )
__magic_name__ = outputs.hidden_states
__magic_name__ = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(a__ ) , a__ )
# Swinv2 has a different seq_length
__magic_name__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__magic_name__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__magic_name__ = outputs.reshaped_hidden_states
self.assertEqual(len(a__ ) , a__ )
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = reshaped_hidden_states[0].shape
__magic_name__ = (
reshaped_hidden_states[0].view(a__ , a__ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def snake_case__ ( self : List[Any] ):
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__magic_name__ = True
self.check_hidden_states_output(a__ , a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ = True
self.check_hidden_states_output(a__ , a__ , a__ , a__ )
def snake_case__ ( self : Optional[Any] ):
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = 3
__magic_name__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__magic_name__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__magic_name__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__magic_name__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__magic_name__ = True
self.check_hidden_states_output(a__ , a__ , a__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ = True
self.check_hidden_states_output(a__ , a__ , a__ , (padded_height, padded_width) )
def snake_case__ ( self : str ):
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a__ )
def snake_case__ ( self : Dict ):
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@slow
def snake_case__ ( self : Any ):
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = SwinvaModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def snake_case__ ( self : List[str] ):
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = _config_zero_init(a__ )
for model_class in self.all_model_classes:
__magic_name__ = model_class(config=a__ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def snake_case__ ( self : Optional[Any] ):
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' )
if is_vision_available()
else None
)
@slow
def snake_case__ ( self : Optional[int] ):
__magic_name__ = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to(
a__ )
__magic_name__ = self.default_image_processor
__magic_name__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
__magic_name__ = image_processor(images=a__ , return_tensors='''pt''' ).to(a__ )
# forward pass
with torch.no_grad():
__magic_name__ = model(**a__ )
# verify the logits
__magic_name__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a__ )
__magic_name__ = torch.tensor([-0.3_947, -0.4_306, 0.0_026] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1E-4 ) )
| 98 | 1 |
'''simple docstring'''
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = [randint(-1_000 ,1_000 ) for i in range(10 )]
__UpperCamelCase = randint(-5_000 ,5_000 )
return (arr, r)
a__ : Optional[Any] = make_dataset()
def _lowercase ( __A ,__A ):
'''simple docstring'''
for triplet in permutations(_SCREAMING_SNAKE_CASE ,3 ):
if sum(_SCREAMING_SNAKE_CASE ) == target:
return tuple(sorted(_SCREAMING_SNAKE_CASE ) )
return (0, 0, 0)
def _lowercase ( __A ,__A ):
'''simple docstring'''
arr.sort()
__UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
for i in range(n - 1 ):
__UpperCamelCase = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = '\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n'
__UpperCamelCase = '\ntriplet_sum1(*dataset)\n'
__UpperCamelCase = '\ntriplet_sum2(*dataset)\n'
__UpperCamelCase = repeat(setup=_SCREAMING_SNAKE_CASE ,stmt=_SCREAMING_SNAKE_CASE ,repeat=5 ,number=10_000 )
__UpperCamelCase = repeat(setup=_SCREAMING_SNAKE_CASE ,stmt=_SCREAMING_SNAKE_CASE ,repeat=5 ,number=10_000 )
return (min(_SCREAMING_SNAKE_CASE ), min(_SCREAMING_SNAKE_CASE ))
if __name__ == "__main__":
from doctest import testmod
testmod()
a__ : Any = solution_times()
print(f'''The time for naive implementation is {times[0]}.''')
print(f'''The time for optimized implementation is {times[1]}.''')
| 349 | """simple docstring"""
from math import pi, sqrt, tan
def __a ( _SCREAMING_SNAKE_CASE ) ->float:
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def __a ( _SCREAMING_SNAKE_CASE ) ->float:
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def __a ( _SCREAMING_SNAKE_CASE ) ->float:
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
a__: List[Any] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(_SCREAMING_SNAKE_CASE , 2 ) * torus_radius * tube_radius
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def __a ( _SCREAMING_SNAKE_CASE ) ->float:
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
a__: int = (sidea + sidea + sidea) / 2
a__: Tuple = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def __a ( _SCREAMING_SNAKE_CASE ) ->float:
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(f"Rectangle: {area_rectangle(10, 20) = }")
print(f"Square: {area_square(10) = }")
print(f"Triangle: {area_triangle(10, 10) = }")
print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(f"Parallelogram: {area_parallelogram(10, 20) = }")
print(f"Rhombus: {area_rhombus(10, 20) = }")
print(f"Trapezium: {area_trapezium(10, 20, 30) = }")
print(f"Circle: {area_circle(20) = }")
print(f"Ellipse: {area_ellipse(10, 20) = }")
print('\nSurface Areas of various geometric shapes: \n')
print(f"Cube: {surface_area_cube(20) = }")
print(f"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(f"Sphere: {surface_area_sphere(20) = }")
print(f"Hemisphere: {surface_area_hemisphere(20) = }")
print(f"Cone: {surface_area_cone(10, 20) = }")
print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(f"Cylinder: {surface_area_cylinder(10, 20) = }")
print(f"Torus: {surface_area_torus(20, 10) = }")
print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(f"Square: {area_reg_polygon(4, 10) = }")
print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 290 | 0 |
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :bool = False ) -> bool:
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_317_044_064_679_887_385_961_981 and not allow_probable:
raise ValueError(
"""Warning: upper bound of deterministic test is exceeded. """
"""Pass allow_probable=True to allow probabilistic test. """
"""A return value of True indicates a probable prime.""" )
# array bounds provided by analysis
__lowerCAmelCase : str = [
2_047,
1_373_653,
25_326_001,
3_215_031_751,
2_152_302_898_747,
3_474_749_660_383,
341_550_071_728_321,
1,
3_825_123_056_546_413_051,
1,
1,
318_665_857_834_031_151_167_461,
3_317_044_064_679_887_385_961_981,
]
__lowerCAmelCase : Tuple = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(SCREAMING_SNAKE_CASE , 1 ):
if n < _p:
# then we have our last prime to check
__lowerCAmelCase : Any = primes[:idx]
break
__lowerCAmelCase : Any = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
__lowerCAmelCase : List[Any] = False
for r in range(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = pow(SCREAMING_SNAKE_CASE , d * 2**r , SCREAMING_SNAKE_CASE )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
__lowerCAmelCase : int = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def _SCREAMING_SNAKE_CASE ( ) -> None:
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838_201 )
assert miller_rabin(838_207 )
# 1_373_653
assert not miller_rabin(17_316_001 )
assert miller_rabin(17_316_017 )
# 25_326_001
assert not miller_rabin(3_078_386_641 )
assert miller_rabin(3_078_386_653 )
# 3_215_031_751
assert not miller_rabin(1_713_045_574_801 )
assert miller_rabin(1_713_045_574_819 )
# 2_152_302_898_747
assert not miller_rabin(2_779_799_728_307 )
assert miller_rabin(2_779_799_728_327 )
# 3_474_749_660_383
assert not miller_rabin(113_850_023_909_441 )
assert miller_rabin(113_850_023_909_527 )
# 341_550_071_728_321
assert not miller_rabin(1_275_041_018_848_804_351 )
assert miller_rabin(1_275_041_018_848_804_391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79_666_464_458_507_787_791_867 )
assert miller_rabin(79_666_464_458_507_787_791_951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552_840_677_446_647_897_660_333 )
assert miller_rabin(552_840_677_446_647_897_660_359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin() | 352 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class snake_case_ ( __lowercase ):
A_ = ['image_processor', 'tokenizer']
A_ = 'ChineseCLIPImageProcessor'
A_ = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self : Optional[Any] , _snake_case : List[Any]=None , _snake_case : str=None , **_snake_case : int )->List[str]:
'''simple docstring'''
__lowerCAmelCase : str = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _snake_case , )
__lowerCAmelCase : List[str] = kwargs.pop("""feature_extractor""" )
__lowerCAmelCase : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_snake_case , _snake_case )
__lowerCAmelCase : Any = self.image_processor
def __call__( self : Optional[Any] , _snake_case : Tuple=None , _snake_case : Tuple=None , _snake_case : List[str]=None , **_snake_case : Any )->Union[str, Any]:
'''simple docstring'''
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
__lowerCAmelCase : List[Any] = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
if images is not None:
__lowerCAmelCase : Tuple = self.image_processor(_snake_case , return_tensors=_snake_case , **_snake_case )
if text is not None and images is not None:
__lowerCAmelCase : Optional[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case ) , tensor_type=_snake_case )
def UpperCAmelCase__ ( self : Optional[int] , *_snake_case : Union[str, Any] , **_snake_case : int )->Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def UpperCAmelCase__ ( self : Dict , *_snake_case : Dict , **_snake_case : Any )->Dict:
'''simple docstring'''
return self.tokenizer.decode(*_snake_case , **_snake_case )
@property
def UpperCAmelCase__ ( self : Optional[int] )->str:
'''simple docstring'''
__lowerCAmelCase : Tuple = self.tokenizer.model_input_names
__lowerCAmelCase : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase__ ( self : Dict )->Union[str, Any]:
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _snake_case , )
return self.image_processor_class | 232 | 0 |
'''simple docstring'''
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
lowerCamelCase :str = TypeVar('''T''')
class _lowerCAmelCase ( Generic[T] ):
def __init__(self , lowercase = True ):
A_ : dict[T, list[T]] = {} # dictionary of lists
A_ : Any = directed
def _a (self , lowercase , lowercase ):
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowercase )
self.adj_list[destination_vertex].append(lowercase )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowercase )
A_ : Dict = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(lowercase )
A_ : int = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
A_ : Optional[Any] = [destination_vertex]
A_ : Tuple = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowercase )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowercase )
A_ : Tuple = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
A_ : Tuple = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
A_ : int = [destination_vertex]
A_ : List[str] = []
return self
def __repr__(self ):
return pformat(self.adj_list ) | 206 |
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class _lowerCAmelCase :
def __init__(self ):
A_ : int = {}
def _a (self , lowercase , lowercase , lowercase=1 ):
if self.graph.get(lowercase ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
A_ : Tuple = [[w, v]]
if not self.graph.get(lowercase ):
A_ : Union[str, Any] = []
def _a (self ):
return list(self.graph )
def _a (self , lowercase , lowercase ):
if self.graph.get(lowercase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowercase )
def _a (self , lowercase=-2 , lowercase=-1 ):
if s == d:
return []
A_ : Union[str, Any] = []
A_ : Dict = []
if s == -2:
A_ : List[Any] = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A_ : Any = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A_ : Tuple = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowercase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A_ : List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowercase ) != 0:
A_ : List[str] = stack[len(lowercase ) - 1]
else:
A_ : str = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return visited
def _a (self , lowercase=-1 ):
if c == -1:
A_ : Tuple = floor(random() * 10000 ) + 10
for i in range(lowercase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A_ : List[str] = floor(random() * c ) + 1
if n != i:
self.add_pair(lowercase , lowercase , 1 )
def _a (self , lowercase=-2 ):
A_ : Union[str, Any] = deque()
A_ : Tuple = []
if s == -2:
A_ : int = list(self.graph )[0]
d.append(lowercase )
visited.append(lowercase )
while d:
A_ : Any = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _a (self , lowercase ):
A_ : Dict = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _a (self , lowercase ):
return len(self.graph[u] )
def _a (self , lowercase=-2 ):
A_ : Dict = []
A_ : Optional[Any] = []
if s == -2:
A_ : int = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A_ : Optional[Any] = s
A_ : int = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A_ : Optional[int] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A_ : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowercase ) != 0:
A_ : int = stack[len(lowercase ) - 1]
else:
A_ : Optional[Any] = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return sorted_nodes
def _a (self ):
A_ : Dict = []
A_ : Tuple = []
A_ : Any = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A_ : Optional[int] = -2
A_ : List[Any] = []
A_ : List[str] = s
A_ : Optional[int] = False
A_ : Union[str, Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A_ : List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A_ : Dict = len(lowercase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A_ : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A_ : str = True
if len(lowercase ) != 0:
A_ : Union[str, Any] = stack[len(lowercase ) - 1]
else:
A_ : Tuple = False
indirect_parents.append(lowercase )
A_ : Tuple = s
A_ : Tuple = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return list(lowercase )
def _a (self ):
A_ : Union[str, Any] = []
A_ : str = []
A_ : List[Any] = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A_ : List[Any] = -2
A_ : Tuple = []
A_ : Optional[Any] = s
A_ : Union[str, Any] = False
A_ : Optional[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A_ : int = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A_ : Dict = len(lowercase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A_ : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A_ : List[str] = True
if len(lowercase ) != 0:
A_ : Dict = stack[len(lowercase ) - 1]
else:
A_ : int = False
indirect_parents.append(lowercase )
A_ : List[Any] = s
A_ : int = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return False
def _a (self , lowercase=-2 , lowercase=-1 ):
A_ : str = time()
self.dfs(lowercase , lowercase )
A_ : Any = time()
return end - begin
def _a (self , lowercase=-2 ):
A_ : Union[str, Any] = time()
self.bfs(lowercase )
A_ : Optional[Any] = time()
return end - begin
class _lowerCAmelCase :
def __init__(self ):
A_ : List[str] = {}
def _a (self , lowercase , lowercase , lowercase=1 ):
# check if the u exists
if self.graph.get(lowercase ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
A_ : int = [[w, v]]
# add the other way
if self.graph.get(lowercase ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
A_ : Any = [[w, u]]
def _a (self , lowercase , lowercase ):
if self.graph.get(lowercase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowercase )
# the other way round
if self.graph.get(lowercase ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowercase )
def _a (self , lowercase=-2 , lowercase=-1 ):
if s == d:
return []
A_ : Dict = []
A_ : List[Any] = []
if s == -2:
A_ : str = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A_ : List[str] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A_ : Union[str, Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowercase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A_ : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowercase ) != 0:
A_ : Dict = stack[len(lowercase ) - 1]
else:
A_ : List[str] = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return visited
def _a (self , lowercase=-1 ):
if c == -1:
A_ : Union[str, Any] = floor(random() * 10000 ) + 10
for i in range(lowercase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A_ : Union[str, Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(lowercase , lowercase , 1 )
def _a (self , lowercase=-2 ):
A_ : int = deque()
A_ : Optional[Any] = []
if s == -2:
A_ : Optional[int] = list(self.graph )[0]
d.append(lowercase )
visited.append(lowercase )
while d:
A_ : Union[str, Any] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _a (self , lowercase ):
return len(self.graph[u] )
def _a (self ):
A_ : Optional[int] = []
A_ : Dict = []
A_ : Union[str, Any] = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A_ : List[Any] = -2
A_ : List[str] = []
A_ : int = s
A_ : Optional[int] = False
A_ : Any = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A_ : Tuple = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A_ : int = len(lowercase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A_ : Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A_ : str = True
if len(lowercase ) != 0:
A_ : Any = stack[len(lowercase ) - 1]
else:
A_ : str = False
indirect_parents.append(lowercase )
A_ : Dict = s
A_ : List[Any] = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return list(lowercase )
def _a (self ):
A_ : Optional[int] = []
A_ : Optional[int] = []
A_ : List[str] = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A_ : Any = -2
A_ : Any = []
A_ : Tuple = s
A_ : str = False
A_ : str = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A_ : Dict = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A_ : Any = len(lowercase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A_ : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A_ : Union[str, Any] = True
if len(lowercase ) != 0:
A_ : List[Any] = stack[len(lowercase ) - 1]
else:
A_ : int = False
indirect_parents.append(lowercase )
A_ : int = s
A_ : int = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return False
def _a (self ):
return list(self.graph )
def _a (self , lowercase=-2 , lowercase=-1 ):
A_ : Any = time()
self.dfs(lowercase , lowercase )
A_ : Optional[Any] = time()
return end - begin
def _a (self , lowercase=-2 ):
A_ : List[Any] = time()
self.bfs(lowercase )
A_ : List[Any] = time()
return end - begin | 206 | 1 |
'''simple docstring'''
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def UpperCamelCase_( snake_case : Any ):
'''simple docstring'''
if isinstance(snake_case , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class _snake_case :
def lowerCAmelCase__ ( self , a__ , a__ ) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ , a__=None , **a__ ) -> str:
'''simple docstring'''
snake_case_ = VisionTextDualEncoderConfig.from_vision_text_configs(a__ , a__ )
snake_case_ = TFVisionTextDualEncoderModel(a__ )
snake_case_ = model(input_ids=a__ , pixel_values=a__ , attention_mask=a__ )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ , a__=None , **a__ ) -> str:
'''simple docstring'''
snake_case_ , snake_case_ = self.get_vision_text_model(a__ , a__ )
snake_case_ = TFVisionTextDualEncoderModel(vision_model=a__ , text_model=a__ )
snake_case_ = model(input_ids=a__ , pixel_values=a__ , attention_mask=a__ )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ , a__=None , **a__ ) -> int:
'''simple docstring'''
snake_case_ , snake_case_ = self.get_vision_text_model(a__ , a__ )
snake_case_ = {"vision_model": vision_model, "text_model": text_model}
snake_case_ = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**a__ )
snake_case_ = model(input_ids=a__ , pixel_values=a__ , attention_mask=a__ )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ , a__=None , **a__ ) -> Any:
'''simple docstring'''
snake_case_ , snake_case_ = self.get_vision_text_model(a__ , a__ )
snake_case_ = TFVisionTextDualEncoderModel(vision_model=a__ , text_model=a__ )
snake_case_ = model(input_ids=a__ , pixel_values=a__ , attention_mask=a__ )
snake_case_ = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a__ )
snake_case_ = TFVisionTextDualEncoderModel.from_pretrained(a__ )
snake_case_ = model(input_ids=a__ , pixel_values=a__ , attention_mask=a__ )
snake_case_ = after_output[0].numpy()
snake_case_ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a__ , 1e-5 )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ , a__=None , **a__ ) -> Dict:
'''simple docstring'''
snake_case_ , snake_case_ = self.get_vision_text_model(a__ , a__ )
snake_case_ = TFVisionTextDualEncoderModel(vision_model=a__ , text_model=a__ )
snake_case_ = model(
input_ids=a__ , pixel_values=a__ , attention_mask=a__ , output_attentions=a__ )
snake_case_ = output.vision_model_output.attentions
self.assertEqual(len(a__ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case_ = to_atuple(vision_model.config.image_size )
snake_case_ = to_atuple(vision_model.config.patch_size )
snake_case_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
snake_case_ = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
snake_case_ = output.text_model_output.attentions
self.assertEqual(len(a__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowerCAmelCase__ ( self , a__ , a__ , a__ ) -> List[str]:
'''simple docstring'''
snake_case_ = np.abs((a - b) ).max()
self.assertLessEqual(a__ , a__ , F'Difference between torch and flax is {diff} (>= {tol}).' )
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**a__ )
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**a__ )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**a__ )
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ = self.prepare_config_and_inputs()
self.check_save_load(**a__ )
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**a__ )
@slow
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ , snake_case_ = self.get_pretrained_model_and_inputs()
snake_case_ = model_a(**a__ )
snake_case_ = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(a__ )
snake_case_ = TFVisionTextDualEncoderModel.from_pretrained(a__ )
snake_case_ = model_a(**a__ )
snake_case_ = after_outputs[0].numpy()
snake_case_ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a__ , 1e-5 )
@require_tf
class _snake_case ( lowercase_ , unittest.TestCase ):
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-random-bert" )
snake_case_ = 13
snake_case_ = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
snake_case_ = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
snake_case_ = random_attention_mask([batch_size, 4] )
snake_case_ = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def lowerCAmelCase__ ( self , a__ , a__ ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = TFViTModel(a__ , name="vision_model" )
snake_case_ = TFBertModel(a__ , name="text_model" )
return vision_model, text_model
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = TFViTModelTester(self )
snake_case_ = TFBertModelTester(self )
snake_case_ = vit_model_tester.prepare_config_and_inputs()
snake_case_ = bert_model_tester.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ = vision_config_and_inputs
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _snake_case ( lowercase_ , unittest.TestCase ):
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"Rocketknight1/tiny-random-deit-tf" , "hf-internal-testing/tiny-random-roberta" )
snake_case_ = 13
snake_case_ = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
snake_case_ = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
snake_case_ = random_attention_mask([batch_size, 4] )
snake_case_ = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ , a__=None , **a__ ) -> Dict:
'''simple docstring'''
snake_case_ , snake_case_ = self.get_vision_text_model(a__ , a__ )
snake_case_ = TFVisionTextDualEncoderModel(vision_model=a__ , text_model=a__ )
snake_case_ = model(
input_ids=a__ , pixel_values=a__ , attention_mask=a__ , output_attentions=a__ )
snake_case_ = output.vision_model_output.attentions
self.assertEqual(len(a__ ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
snake_case_ = to_atuple(vision_model.config.image_size )
snake_case_ = to_atuple(vision_model.config.patch_size )
snake_case_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
snake_case_ = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
snake_case_ = output.text_model_output.attentions
self.assertEqual(len(a__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowerCAmelCase__ ( self , a__ , a__ ) -> List[Any]:
'''simple docstring'''
snake_case_ = TFDeiTModel(a__ , name="vision_model" )
snake_case_ = TFRobertaModel(a__ , name="text_model" )
return vision_model, text_model
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = TFDeiTModelTester(self )
snake_case_ = TFRobertaModelTester(self )
snake_case_ = vit_model_tester.prepare_config_and_inputs()
snake_case_ = bert_model_tester.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ = vision_config_and_inputs
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _snake_case ( lowercase_ , unittest.TestCase ):
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"Rocketknight1/tiny-random-clip-tf" , "hf-internal-testing/tiny-random-bert" )
snake_case_ = 13
snake_case_ = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
snake_case_ = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
snake_case_ = random_attention_mask([batch_size, 4] )
snake_case_ = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def lowerCAmelCase__ ( self , a__ , a__ ) -> Optional[int]:
'''simple docstring'''
snake_case_ = TFCLIPVisionModel(a__ , name="vision_model" )
snake_case_ = TFBertModel(a__ , name="text_model" )
return vision_model, text_model
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = TFCLIPVisionModelTester(self )
snake_case_ = TFBertModelTester(self )
snake_case_ = clip_model_tester.prepare_config_and_inputs()
snake_case_ = bert_model_tester.prepare_config_and_inputs()
snake_case_ , snake_case_ = vision_config_and_inputs
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class _snake_case ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = TFVisionTextDualEncoderModel.from_pretrained(
"clip-italian/clip-italian" , logit_scale_init_value=1.0 , from_pt=a__ )
snake_case_ = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
snake_case_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
snake_case_ = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=a__ , padding=a__ , return_tensors="np" )
snake_case_ = model(**a__ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
snake_case_ = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , a__ , atol=1e-3 ) )
| 92 |
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Optional[int] = "Alexander Joslin"
import operator as op
from .stack import Stack
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
snake_case_ = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
snake_case_ = Stack()
snake_case_ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(snake_case ) )
elif i in operators:
# RULE 2
operator_stack.push(snake_case )
elif i == ")":
# RULE 4
snake_case_ = operator_stack.peek()
operator_stack.pop()
snake_case_ = operand_stack.peek()
operand_stack.pop()
snake_case_ = operand_stack.peek()
operand_stack.pop()
snake_case_ = operators[opr](snake_case , snake_case )
operand_stack.push(snake_case )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[Any] = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(F"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 92 | 1 |
def _SCREAMING_SNAKE_CASE ( a , a ) -> list[int]:
__A : Optional[int] = int(a )
# Initialize Result
__A : Optional[int] = []
# Traverse through all denomination
for denomination in reversed(a ):
# Find denominations
while int(a ) >= int(a ):
total_value -= int(a )
answer.append(a ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCAmelCase : List[str] = []
UpperCAmelCase : Optional[int] = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
UpperCAmelCase : List[Any] = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
UpperCAmelCase : int = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCAmelCase : Optional[int] = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
UpperCAmelCase : Tuple = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(F"""Following is minimal change for {value}: """)
UpperCAmelCase : Optional[int] = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 280 |
import colorsys
from PIL import Image # type: ignore
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> float:
__A : List[str] = x
__A : str = y
for step in range(a ): # noqa: B007
__A : Union[str, Any] = a * a - b * b + x
__A : Optional[int] = 2 * a * b + y
__A : List[str] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _SCREAMING_SNAKE_CASE ( a ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def _SCREAMING_SNAKE_CASE ( a ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(a , 1 , 1 ) )
def _SCREAMING_SNAKE_CASE ( a = 8_00 , a = 6_00 , a = -0.6 , a = 0 , a = 3.2 , a = 50 , a = True , ) -> Image.Image:
__A : str = Image.new('RGB' , (image_width, image_height) )
__A : Dict = img.load()
# loop through the image-coordinates
for image_x in range(a ):
for image_y in range(a ):
# determine the figure-coordinates based on the image-coordinates
__A : Dict = figure_width / image_width * image_height
__A : Union[str, Any] = figure_center_x + (image_x / image_width - 0.5) * figure_width
__A : Optional[Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
__A : Union[str, Any] = get_distance(a , a , a )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
__A : Optional[Any] = get_color_coded_rgb(a )
else:
__A : Dict = get_black_and_white_rgb(a )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
UpperCAmelCase : str = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 280 | 1 |
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class _snake_case :
'''simple docstring'''
pass
| 357 |
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def lowerCamelCase_ ( _a : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = [False] * len(_a )
UpperCAmelCase_ : Any = [-1] * len(_a )
def dfs(_a : Optional[int] , _a : str ):
UpperCAmelCase_ : int = True
UpperCAmelCase_ : Optional[int] = c
for u in graph[v]:
if not visited[u]:
dfs(_a , 1 - c )
for i in range(len(_a ) ):
if not visited[i]:
dfs(_a , 0 )
for i in range(len(_a ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
UpperCamelCase_ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 59 | 0 |
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
snake_case_ = """pixel_values"""
snake_case_ = False
snake_case_ = TimmBackboneConfig
def __init__( self : List[Any] ,A : Optional[int] ,**A : Union[str, Any] ):
requires_backends(self ,"timm" )
super().__init__(snake_case__ )
__A = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name." )
if config.backbone not in timm.list_models():
raise ValueError(f'''backbone {config.backbone} is not supported by timm.''' )
if hasattr(snake_case__ ,"out_features" ) and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead." )
__A = getattr(snake_case__ ,"use_pretrained_backbone" ,snake_case__ )
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False." )
# We just take the final layer by default. This matches the default for the transformers models.
__A = config.out_indices if getattr(snake_case__ ,"out_indices" ,snake_case__ ) is not None else (-1,)
__A = timm.create_model(
config.backbone ,pretrained=snake_case__ ,features_only=config.features_only ,in_chans=config.num_channels ,out_indices=snake_case__ ,**snake_case__ ,)
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
__A = self._backbone.return_layers
__A = {layer['''module''']: str(snake_case__ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(snake_case__ )
@classmethod
def UpperCamelCase_ ( cls : Dict ,A : Tuple ,*A : List[str] ,**A : Optional[int] ):
requires_backends(cls ,["vision", "timm"] )
from ...models.timm_backbone import TimmBackboneConfig
__A = kwargs.pop("config" ,TimmBackboneConfig() )
__A = kwargs.pop("use_timm_backbone" ,snake_case__ )
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones" )
__A = kwargs.pop("num_channels" ,config.num_channels )
__A = kwargs.pop("features_only" ,config.features_only )
__A = kwargs.pop("use_pretrained_backbone" ,config.use_pretrained_backbone )
__A = kwargs.pop("out_indices" ,config.out_indices )
__A = TimmBackboneConfig(
backbone=snake_case__ ,num_channels=snake_case__ ,features_only=snake_case__ ,use_pretrained_backbone=snake_case__ ,out_indices=snake_case__ ,)
return super()._from_config(snake_case__ ,**snake_case__ )
def UpperCamelCase_ ( self : Tuple ,A : List[Any] ):
pass
def UpperCamelCase_ ( self : Any ,A : int ,A : int=None ,A : Union[str, Any]=None ,A : Union[str, Any]=None ,**A : Dict ):
__A = return_dict if return_dict is not None else self.config.use_return_dict
__A = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__A = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
__A = self._all_layers
__A = self._backbone(snake_case__ ,**snake_case__ )
__A = self._return_layers
__A = tuple(hidden_states[i] for i in self.out_indices )
else:
__A = self._backbone(snake_case__ ,**snake_case__ )
__A = None
__A = tuple(snake_case__ )
__A = tuple(snake_case__ ) if hidden_states is not None else None
if not return_dict:
__A = (feature_maps,)
if output_hidden_states:
__A = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=snake_case__ ,hidden_states=snake_case__ ,attentions=snake_case__ )
| 15 | import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
__snake_case = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
__snake_case = parser.parse_args()
__snake_case = '''cpu'''
__snake_case = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
__snake_case = '''path-to-your-trained-model'''
__snake_case = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
__snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__snake_case = pipe.to(device)
# to channels last
__snake_case = pipe.unet.to(memory_format=torch.channels_last)
__snake_case = pipe.vae.to(memory_format=torch.channels_last)
__snake_case = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
__snake_case = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
__snake_case = torch.randn(2, 4, 64, 64)
__snake_case = torch.rand(1) * 9_99
__snake_case = torch.randn(2, 77, 7_68)
__snake_case = (sample, timestep, encoder_hidden_status)
try:
__snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
__snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
__snake_case = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
__snake_case = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
__snake_case = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
__snake_case = 6_66
__snake_case = torch.Generator(device).manual_seed(seed)
__snake_case = {'''generator''': generator}
if args.steps is not None:
__snake_case = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
__snake_case = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 348 | 0 |
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
return base * power(UpperCamelCase_ ,(exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("Raise base to the power of exponent using recursion...")
_SCREAMING_SNAKE_CASE : str = int(input("Enter the base: ").strip())
_SCREAMING_SNAKE_CASE : Dict = int(input("Enter the exponent: ").strip())
_SCREAMING_SNAKE_CASE : str = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
_SCREAMING_SNAKE_CASE : Any = 1 / result
print(f'''{base} to the power of {exponent} is {result}''')
| 213 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = UnCLIPImageVariationPipeline
__magic_name__ = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
__magic_name__ = IMAGE_VARIATION_BATCH_PARAMS
__magic_name__ = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
__magic_name__ = False
@property
def a_ ( self ):
return 3_2
@property
def a_ ( self ):
return 3_2
@property
def a_ ( self ):
return self.time_input_dim
@property
def a_ ( self ):
return self.time_input_dim * 4
@property
def a_ ( self ):
return 1_0_0
@property
def a_ ( self ):
snake_case = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def a_ ( self ):
torch.manual_seed(0 )
snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(__snake_case )
@property
def a_ ( self ):
torch.manual_seed(0 )
snake_case = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=3_2 , intermediate_size=3_7 , patch_size=1 , )
return CLIPVisionModelWithProjection(__snake_case )
@property
def a_ ( self ):
torch.manual_seed(0 )
snake_case = {
'''clip_embeddings_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''cross_attention_dim''': self.cross_attention_dim,
}
snake_case = UnCLIPTextProjModel(**__snake_case )
return model
@property
def a_ ( self ):
torch.manual_seed(0 )
snake_case = {
'''sample_size''': 3_2,
# RGB in channels
'''in_channels''': 3,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 6,
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': '''identity''',
}
snake_case = UNetaDConditionModel(**__snake_case )
return model
@property
def a_ ( self ):
return {
"sample_size": 6_4,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def a_ ( self ):
torch.manual_seed(0 )
snake_case = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def a_ ( self ):
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
snake_case = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def a_ ( self ):
snake_case = self.dummy_decoder
snake_case = self.dummy_text_proj
snake_case = self.dummy_text_encoder
snake_case = self.dummy_tokenizer
snake_case = self.dummy_super_res_first
snake_case = self.dummy_super_res_last
snake_case = UnCLIPScheduler(
variance_type='''learned_range''' , prediction_type='''epsilon''' , num_train_timesteps=1_0_0_0 , )
snake_case = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''epsilon''' , num_train_timesteps=1_0_0_0 , )
snake_case = CLIPImageProcessor(crop_size=3_2 , size=3_2 )
snake_case = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def a_ ( self , __snake_case , __snake_case=0 , __snake_case=True ):
snake_case = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__snake_case ) ).to(__snake_case )
if str(__snake_case ).startswith('''mps''' ):
snake_case = torch.manual_seed(__snake_case )
else:
snake_case = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
if pil_image:
snake_case = input_image * 0.5 + 0.5
snake_case = input_image.clamp(0 , 1 )
snake_case = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
snake_case = DiffusionPipeline.numpy_to_pil(__snake_case )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def a_ ( self ):
snake_case = '''cpu'''
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**__snake_case )
snake_case = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = pipe(**__snake_case )
snake_case = output.images
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = pipe(
**__snake_case , return_dict=__snake_case , )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case = np.array(
[
0.9997,
0.0002,
0.9997,
0.9997,
0.9969,
0.0023,
0.9997,
0.9969,
0.9970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def a_ ( self ):
snake_case = '''cpu'''
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**__snake_case )
snake_case = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = pipe(**__snake_case )
snake_case = output.images
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = pipe(
**__snake_case , return_dict=__snake_case , )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case = np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def a_ ( self ):
snake_case = '''cpu'''
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**__snake_case )
snake_case = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = [
pipeline_inputs['''image'''],
pipeline_inputs['''image'''],
]
snake_case = pipe(**__snake_case )
snake_case = output.images
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = [
tuple_pipeline_inputs['''image'''],
tuple_pipeline_inputs['''image'''],
]
snake_case = pipe(
**__snake_case , return_dict=__snake_case , )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 6_4, 6_4, 3)
snake_case = np.array(
[
0.9997,
0.9989,
0.0008,
0.0021,
0.9960,
0.0018,
0.0014,
0.0002,
0.9933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def a_ ( self ):
snake_case = torch.device('''cpu''' )
class A__ :
"""simple docstring"""
__magic_name__ = 1
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**__snake_case )
snake_case = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = torch.Generator(device=__snake_case ).manual_seed(0 )
snake_case = pipe.decoder.dtype
snake_case = 1
snake_case = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
snake_case = pipe.prepare_latents(
__snake_case , dtype=__snake_case , device=__snake_case , generator=__snake_case , latents=__snake_case , scheduler=DummyScheduler() )
snake_case = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
snake_case = pipe.prepare_latents(
__snake_case , dtype=__snake_case , device=__snake_case , generator=__snake_case , latents=__snake_case , scheduler=DummyScheduler() )
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = pipe(
**__snake_case , decoder_latents=__snake_case , super_res_latents=__snake_case ).images
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
# Don't pass image, instead pass embedding
snake_case = pipeline_inputs.pop('''image''' )
snake_case = pipe.image_encoder(__snake_case ).image_embeds
snake_case = pipe(
**__snake_case , decoder_latents=__snake_case , super_res_latents=__snake_case , image_embeddings=__snake_case , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def a_ ( self ):
snake_case = torch_device == '''cpu'''
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
snake_case = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=__snake_case , expected_max_diff=__snake_case )
@skip_mps
def a_ ( self ):
snake_case = torch_device == '''cpu'''
snake_case = True
snake_case = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
self._test_inference_batch_single_identical(
test_max_difference=__snake_case , relax_max_difference=__snake_case , additional_params_copy_to_batched_inputs=__snake_case , )
def a_ ( self ):
snake_case = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
snake_case = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=__snake_case , additional_params_copy_to_batched_inputs=__snake_case , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=__snake_case )
@skip_mps
def a_ ( self ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def a_ ( self ):
return super().test_save_load_local()
@skip_mps
def a_ ( self ):
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
"""simple docstring"""
def a_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self ):
snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png''' )
snake_case = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/unclip/karlo_v1_alpha_cat_variation_fp16.npy''' )
snake_case = UnCLIPImageVariationPipeline.from_pretrained(
'''kakaobrain/karlo-v1-alpha-image-variations''' , torch_dtype=torch.floataa )
snake_case = pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
snake_case = torch.Generator(device='''cpu''' ).manual_seed(0 )
snake_case = pipeline(
__snake_case , generator=__snake_case , output_type='''np''' , )
snake_case = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert_mean_pixel_difference(__snake_case , __snake_case , 1_5 )
| 213 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class __snake_case ( unittest.TestCase):
def __init__( self : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str=1_3 , __lowerCAmelCase : List[str]=7 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Dict=True , __lowerCAmelCase : str=True , __lowerCAmelCase : str=True , __lowerCAmelCase : Dict=9_9 , __lowerCAmelCase : Any=3_2 , __lowerCAmelCase : List[str]=5 , __lowerCAmelCase : Union[str, Any]=4 , __lowerCAmelCase : str=3_7 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Dict=5_1_2 , __lowerCAmelCase : Optional[int]=1_6 , __lowerCAmelCase : str=2 , __lowerCAmelCase : List[Any]=0.02 , __lowerCAmelCase : List[str]=4 , ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = parent
_lowerCamelCase : Optional[int] = batch_size
_lowerCamelCase : Tuple = seq_length
_lowerCamelCase : Optional[Any] = is_training
_lowerCamelCase : Dict = use_attention_mask
_lowerCamelCase : Dict = use_token_type_ids
_lowerCamelCase : List[Any] = use_labels
_lowerCamelCase : str = vocab_size
_lowerCamelCase : Union[str, Any] = hidden_size
_lowerCamelCase : Optional[Any] = num_hidden_layers
_lowerCamelCase : Optional[Any] = num_attention_heads
_lowerCamelCase : Dict = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : int = hidden_dropout_prob
_lowerCamelCase : Dict = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : List[Any] = type_vocab_size
_lowerCamelCase : Any = type_sequence_label_size
_lowerCamelCase : Union[str, Any] = initializer_range
_lowerCamelCase : Dict = num_choices
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : Any = None
if self.use_attention_mask:
_lowerCamelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Optional[Any] = None
if self.use_token_type_ids:
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase : Dict = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = config_and_inputs
_lowerCamelCase : Union[str, Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[str] = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = config_and_inputs
_lowerCamelCase : Dict = True
_lowerCamelCase : Optional[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __snake_case ( UpperCAmelCase__ , unittest.TestCase):
snake_case__ : Any = True
snake_case__ : Optional[Any] = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Tuple = FlaxBertModelTester(self )
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Tuple = FlaxBertModel.from_pretrained('''bert-base-cased''' )
_lowerCamelCase : Optional[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
| 72 |
class A :
'''simple docstring'''
def __init__(self : List[str] ) -> Tuple:
"""simple docstring"""
lowercase__ = 0
lowercase__ = 0
lowercase__ = {}
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
if vertex not in self.adjacency:
lowercase__ = {}
self.num_vertices += 1
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : List[str] ) -> Tuple:
"""simple docstring"""
self.add_vertex(_UpperCAmelCase )
self.add_vertex(_UpperCAmelCase )
if head == tail:
return
lowercase__ = weight
lowercase__ = weight
def lowerCamelCase__ (self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.get_edges()
for edge in edges:
lowercase__ , lowercase__ , lowercase__ = edge
edges.remove((tail, head, weight) )
for i in range(len(_UpperCAmelCase ) ):
lowercase__ = list(edges[i] )
edges.sort(key=lambda _UpperCAmelCase : e[2] )
for i in range(len(_UpperCAmelCase ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
lowercase__ = edges[i][2] + 1
for edge in edges:
lowercase__ , lowercase__ , lowercase__ = edge
lowercase__ = weight
lowercase__ = weight
def __str__(self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = """"""
for tail in self.adjacency:
for head in self.adjacency[tail]:
lowercase__ = self.adjacency[head][tail]
string += f'''{head} -> {tail} == {weight}\n'''
return string.rstrip("""\n""" )
def lowerCamelCase__ (self : Any ) -> str:
"""simple docstring"""
lowercase__ = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def lowerCamelCase__ (self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def lowerCamelCase__ (_UpperCAmelCase : List[str]=None , _UpperCAmelCase : Any=None ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = Graph()
if vertices is None:
lowercase__ = []
if edges is None:
lowercase__ = []
for vertex in vertices:
g.add_vertex(_UpperCAmelCase )
for edge in edges:
g.add_edge(*_UpperCAmelCase )
return g
class A :
'''simple docstring'''
def __init__(self : Optional[Any] ) -> str:
"""simple docstring"""
lowercase__ = {}
lowercase__ = {}
def __len__(self : Optional[Any] ) -> Dict:
"""simple docstring"""
return len(self.parent )
def lowerCamelCase__ (self : str , _UpperCAmelCase : Dict ) -> Any:
"""simple docstring"""
if item in self.parent:
return self.find(_UpperCAmelCase )
lowercase__ = item
lowercase__ = 0
return item
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : Dict ) -> Any:
"""simple docstring"""
if item not in self.parent:
return self.make_set(_UpperCAmelCase )
if item != self.parent[item]:
lowercase__ = self.find(self.parent[item] )
return self.parent[item]
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.find(_UpperCAmelCase )
lowercase__ = self.find(_UpperCAmelCase )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
lowercase__ = roota
return roota
if self.rank[roota] < self.rank[roota]:
lowercase__ = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
lowercase__ = roota
return roota
return None
@staticmethod
def lowerCamelCase__ (_UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
lowercase__ = graph.num_vertices
lowercase__ = Graph.UnionFind()
lowercase__ = []
while num_components > 1:
lowercase__ = {}
for vertex in graph.get_vertices():
lowercase__ = -1
lowercase__ = graph.get_edges()
for edge in edges:
lowercase__ , lowercase__ , lowercase__ = edge
edges.remove((tail, head, weight) )
for edge in edges:
lowercase__ , lowercase__ , lowercase__ = edge
lowercase__ = union_find.find(_UpperCAmelCase )
lowercase__ = union_find.find(_UpperCAmelCase )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowercase__ = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowercase__ = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
lowercase__ , lowercase__ , lowercase__ = cheap_edge[vertex]
if union_find.find(_UpperCAmelCase ) != union_find.find(_UpperCAmelCase ):
union_find.union(_UpperCAmelCase , _UpperCAmelCase )
mst_edges.append(cheap_edge[vertex] )
lowercase__ = num_components - 1
lowercase__ = Graph.build(edges=_UpperCAmelCase )
return mst
| 305 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class lowerCamelCase__( a__):
UpperCAmelCase__ : int = 'levit'
def __init__( self: int , UpperCamelCase_: List[Any]=2_24 , UpperCamelCase_: Union[str, Any]=3 , UpperCamelCase_: int=3 , UpperCamelCase_: Tuple=2 , UpperCamelCase_: Optional[int]=1 , UpperCamelCase_: List[str]=16 , UpperCamelCase_: Tuple=[1_28, 2_56, 3_84] , UpperCamelCase_: List[str]=[4, 8, 12] , UpperCamelCase_: Optional[int]=[4, 4, 4] , UpperCamelCase_: Union[str, Any]=[16, 16, 16] , UpperCamelCase_: int=0 , UpperCamelCase_: Union[str, Any]=[2, 2, 2] , UpperCamelCase_: Optional[Any]=[2, 2, 2] , UpperCamelCase_: Optional[Any]=0.02 , **UpperCamelCase_: List[Any] , ):
super().__init__(**_lowerCamelCase )
__lowerCamelCase = image_size
__lowerCamelCase = num_channels
__lowerCamelCase = kernel_size
__lowerCamelCase = stride
__lowerCamelCase = padding
__lowerCamelCase = hidden_sizes
__lowerCamelCase = num_attention_heads
__lowerCamelCase = depths
__lowerCamelCase = key_dim
__lowerCamelCase = drop_path_rate
__lowerCamelCase = patch_size
__lowerCamelCase = attention_ratio
__lowerCamelCase = mlp_ratio
__lowerCamelCase = initializer_range
__lowerCamelCase = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class lowerCamelCase__( a__):
UpperCAmelCase__ : int = version.parse('1.11')
@property
def lowerCAmelCase__ ( self: Optional[int] ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase__ ( self: int ):
return 1E-4
| 354 |
import requests
from bsa import BeautifulSoup
def lowerCamelCase__ ( A__ : str = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
__lowerCamelCase = BeautifulSoup(requests.get(A__ ).text , """html.parser""" )
__lowerCamelCase = soup.findAll("""h1""" )
__lowerCamelCase = soup.findAll("""div""" , {"""class""": """maincounter-number"""} )
keys += soup.findAll("""span""" , {"""class""": """panel-title"""} )
values += soup.findAll("""div""" , {"""class""": """number-table-main"""} )
return {key.text.strip(): value.text.strip() for key, value in zip(A__ , A__ )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(f"""{key}\n{value}\n""")
| 29 | 0 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
snake_case_ : Union[str, Any] = 'docs/source/en/_toctree.yml'
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> int:
UpperCAmelCase_ : str = defaultdict(A_ )
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : str = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'''local''': doc['''local'''], '''title''': doc['''title''']} )
else:
new_doc_list.append(A_ )
UpperCAmelCase_ : Optional[int] = new_doc_list
UpperCAmelCase_ : str = [key for key, value in counts.items() if value > 1]
UpperCAmelCase_ : Tuple = []
for duplicate_key in duplicates:
UpperCAmelCase_ : List[Any] = list({doc['''title'''] for doc in doc_list if doc['''local'''] == duplicate_key} )
if len(A_ ) > 1:
raise ValueError(
F"""{duplicate_key} is present several times in the documentation table of content at """
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if '''local''' not in counts or counts[doc['''local''']] == 1] )
UpperCAmelCase_ : Union[str, Any] = sorted(A_, key=lambda SCREAMING_SNAKE_CASE__ : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(A_ ) > 1:
raise ValueError('''{doc_list} has two \'overview\' docs which is not allowed.''' )
overview_doc.extend(A_ )
# Sort
return overview_doc
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int=False ) -> List[Any]:
with open(A_, encoding='''utf-8''' ) as f:
UpperCAmelCase_ : Optional[int] = yaml.safe_load(f.read() )
# Get to the API doc
UpperCAmelCase_ : Union[str, Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCAmelCase_ : List[Any] = content[api_idx]['''sections''']
# Then to the model doc
UpperCAmelCase_ : Tuple = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
UpperCAmelCase_ : int = api_doc[scheduler_idx]['''sections''']
UpperCAmelCase_ : Tuple = clean_doc_toc(A_ )
UpperCAmelCase_ : Optional[int] = False
if new_scheduler_doc != scheduler_doc:
UpperCAmelCase_ : str = True
if overwrite:
UpperCAmelCase_ : Union[str, Any] = new_scheduler_doc
if diff:
if overwrite:
UpperCAmelCase_ : Any = api_doc
with open(A_, '''w''', encoding='''utf-8''' ) as f:
f.write(yaml.dump(A_, allow_unicode=A_ ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[Any]=False ) -> List[str]:
with open(A_, encoding='''utf-8''' ) as f:
UpperCAmelCase_ : List[Any] = yaml.safe_load(f.read() )
# Get to the API doc
UpperCAmelCase_ : Tuple = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCAmelCase_ : Union[str, Any] = content[api_idx]['''sections''']
# Then to the model doc
UpperCAmelCase_ : Dict = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
UpperCAmelCase_ : int = False
UpperCAmelCase_ : Optional[int] = api_doc[pipeline_idx]['''sections''']
UpperCAmelCase_ : Optional[Any] = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
UpperCAmelCase_ : Dict = pipeline_doc['''section''']
UpperCAmelCase_ : List[Any] = clean_doc_toc(A_ )
if overwrite:
UpperCAmelCase_ : Any = new_sub_pipeline_doc
new_pipeline_docs.append(A_ )
# sort overall pipeline doc
UpperCAmelCase_ : str = clean_doc_toc(A_ )
if new_pipeline_docs != pipeline_docs:
UpperCAmelCase_ : Dict = True
if overwrite:
UpperCAmelCase_ : Optional[Any] = new_pipeline_docs
if diff:
if overwrite:
UpperCAmelCase_ : List[Any] = api_doc
with open(A_, '''w''', encoding='''utf-8''' ) as f:
f.write(yaml.dump(A_, allow_unicode=A_ ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
snake_case_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
snake_case_ : Dict = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 125 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = 42
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : str=3 , UpperCamelCase__ : List[Any]=("DownEncoderBlock2D",) , UpperCamelCase__ : Optional[Any]=(64,) , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : Union[str, Any]=32 , UpperCamelCase__ : Optional[Any]="silu" , UpperCamelCase__ : List[str]=True , ) -> str:
"""simple docstring"""
super().__init__()
__magic_name__ = layers_per_block
__magic_name__ = torch.nn.Convad(
UpperCamelCase__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
__magic_name__ = None
__magic_name__ = nn.ModuleList([] )
# down
__magic_name__ = block_out_channels[0]
for i, down_block_type in enumerate(UpperCamelCase__ ):
__magic_name__ = output_channel
__magic_name__ = block_out_channels[i]
__magic_name__ = i == len(UpperCamelCase__ ) - 1
__magic_name__ = get_down_block(
UpperCamelCase__ , num_layers=self.layers_per_block , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
self.down_blocks.append(UpperCamelCase__ )
# mid
__magic_name__ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# out
__magic_name__ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=UpperCamelCase__ , eps=1E-6 )
__magic_name__ = nn.SiLU()
__magic_name__ = 2 * out_channels if double_z else out_channels
__magic_name__ = nn.Convad(block_out_channels[-1] , UpperCamelCase__ , 3 , padding=1 )
__magic_name__ = False
def _lowercase ( self : List[str] , UpperCamelCase__ : Optional[Any] ) -> int:
"""simple docstring"""
__magic_name__ = x
__magic_name__ = self.conv_in(UpperCamelCase__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ : int ):
def custom_forward(*UpperCamelCase__ : str ):
return module(*UpperCamelCase__ )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
# middle
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
for down_block in self.down_blocks:
__magic_name__ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ )
# middle
__magic_name__ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , UpperCamelCase__ )
else:
# down
for down_block in self.down_blocks:
__magic_name__ = down_block(UpperCamelCase__ )
# middle
__magic_name__ = self.mid_block(UpperCamelCase__ )
# post-process
__magic_name__ = self.conv_norm_out(UpperCamelCase__ )
__magic_name__ = self.conv_act(UpperCamelCase__ )
__magic_name__ = self.conv_out(UpperCamelCase__ )
return sample
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : int=3 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : List[Any]=("UpDecoderBlock2D",) , UpperCamelCase__ : List[Any]=(64,) , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : int=32 , UpperCamelCase__ : Optional[int]="silu" , UpperCamelCase__ : Tuple="group" , ) -> Dict:
"""simple docstring"""
super().__init__()
__magic_name__ = layers_per_block
__magic_name__ = nn.Convad(
UpperCamelCase__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
__magic_name__ = None
__magic_name__ = nn.ModuleList([] )
__magic_name__ = in_channels if norm_type == """spatial""" else None
# mid
__magic_name__ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# up
__magic_name__ = list(reversed(UpperCamelCase__ ) )
__magic_name__ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(UpperCamelCase__ ):
__magic_name__ = output_channel
__magic_name__ = reversed_block_out_channels[i]
__magic_name__ = i == len(UpperCamelCase__ ) - 1
__magic_name__ = get_up_block(
UpperCamelCase__ , num_layers=self.layers_per_block + 1 , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , prev_output_channel=UpperCamelCase__ , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , resnet_time_scale_shift=UpperCamelCase__ , )
self.up_blocks.append(UpperCamelCase__ )
__magic_name__ = output_channel
# out
if norm_type == "spatial":
__magic_name__ = SpatialNorm(block_out_channels[0] , UpperCamelCase__ )
else:
__magic_name__ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=UpperCamelCase__ , eps=1E-6 )
__magic_name__ = nn.SiLU()
__magic_name__ = nn.Convad(block_out_channels[0] , UpperCamelCase__ , 3 , padding=1 )
__magic_name__ = False
def _lowercase ( self : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple=None ) -> Tuple:
"""simple docstring"""
__magic_name__ = z
__magic_name__ = self.conv_in(UpperCamelCase__ )
__magic_name__ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ : Optional[int] ):
def custom_forward(*UpperCamelCase__ : int ):
return module(*UpperCamelCase__ )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
__magic_name__ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
# middle
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
__magic_name__ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ )
else:
# middle
__magic_name__ = self.mid_block(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
__magic_name__ = up_block(UpperCamelCase__ , UpperCamelCase__ )
# post-process
if latent_embeds is None:
__magic_name__ = self.conv_norm_out(UpperCamelCase__ )
else:
__magic_name__ = self.conv_norm_out(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = self.conv_act(UpperCamelCase__ )
__magic_name__ = self.conv_out(UpperCamelCase__ )
return sample
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Dict="random" , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Dict=True ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__magic_name__ = n_e
__magic_name__ = vq_embed_dim
__magic_name__ = beta
__magic_name__ = legacy
__magic_name__ = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
__magic_name__ = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
__magic_name__ = self.used.shape[0]
__magic_name__ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
__magic_name__ = self.re_embed
__magic_name__ = self.re_embed + 1
print(
F'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
F'''Using {self.unknown_index} for unknown indices.''' )
else:
__magic_name__ = n_e
__magic_name__ = sane_index_shape
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = inds.shape
assert len(UpperCamelCase__ ) > 1
__magic_name__ = inds.reshape(ishape[0] , -1 )
__magic_name__ = self.used.to(UpperCamelCase__ )
__magic_name__ = (inds[:, :, None] == used[None, None, ...]).long()
__magic_name__ = match.argmax(-1 )
__magic_name__ = match.sum(2 ) < 1
if self.unknown_index == "random":
__magic_name__ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
__magic_name__ = self.unknown_index
return new.reshape(UpperCamelCase__ )
def _lowercase ( self : Optional[Any] , UpperCamelCase__ : str ) -> Tuple:
"""simple docstring"""
__magic_name__ = inds.shape
assert len(UpperCamelCase__ ) > 1
__magic_name__ = inds.reshape(ishape[0] , -1 )
__magic_name__ = self.used.to(UpperCamelCase__ )
if self.re_embed > self.used.shape[0]: # extra token
__magic_name__ = 0 # simply set to zero
__magic_name__ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , UpperCamelCase__ )
return back.reshape(UpperCamelCase__ )
def _lowercase ( self : List[str] , UpperCamelCase__ : List[str] ) -> List[str]:
"""simple docstring"""
__magic_name__ = z.permute(0 , 2 , 3 , 1 ).contiguous()
__magic_name__ = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
__magic_name__ = torch.argmin(torch.cdist(UpperCamelCase__ , self.embedding.weight ) , dim=1 )
__magic_name__ = self.embedding(UpperCamelCase__ ).view(z.shape )
__magic_name__ = None
__magic_name__ = None
# compute loss for embedding
if not self.legacy:
__magic_name__ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
__magic_name__ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
__magic_name__ = z + (z_q - z).detach()
# reshape back to match original input shape
__magic_name__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
__magic_name__ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
__magic_name__ = self.remap_to_used(UpperCamelCase__ )
__magic_name__ = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
__magic_name__ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] ) -> int:
"""simple docstring"""
if self.remap is not None:
__magic_name__ = indices.reshape(shape[0] , -1 ) # add batch axis
__magic_name__ = self.unmap_to_all(UpperCamelCase__ )
__magic_name__ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
__magic_name__ = self.embedding(UpperCamelCase__ )
if shape is not None:
__magic_name__ = z_q.view(UpperCamelCase__ )
# reshape back to match original input shape
__magic_name__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=False ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = parameters
__magic_name__ , __magic_name__ = torch.chunk(UpperCamelCase__ , 2 , dim=1 )
__magic_name__ = torch.clamp(self.logvar , -30.0 , 20.0 )
__magic_name__ = deterministic
__magic_name__ = torch.exp(0.5 * self.logvar )
__magic_name__ = torch.exp(self.logvar )
if self.deterministic:
__magic_name__ = __magic_name__ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def _lowercase ( self : Tuple , UpperCamelCase__ : Optional[torch.Generator] = None ) -> torch.FloatTensor:
"""simple docstring"""
__magic_name__ = randn_tensor(
self.mean.shape , generator=UpperCamelCase__ , device=self.parameters.device , dtype=self.parameters.dtype )
__magic_name__ = self.mean + self.std * sample
return x
def _lowercase ( self : Dict , UpperCamelCase__ : Optional[int]=None ) -> Any:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict=[1, 2, 3] ) -> Optional[int]:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
__magic_name__ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=UpperCamelCase__ )
def _lowercase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return self.mean
| 88 | 0 |
'''simple docstring'''
from __future__ import annotations
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Dict , __UpperCAmelCase : str , __UpperCAmelCase : str ):
'''simple docstring'''
_A , _A = text, pattern
_A , _A = len(__UpperCAmelCase ), len(__UpperCAmelCase )
def lowerCAmelCase ( self : Dict , __UpperCAmelCase : str ):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : int ):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = []
for i in range(self.textLen - self.patLen + 1 ):
_A = self.mismatch_in_text(__UpperCAmelCase )
if mismatch_index == -1:
positions.append(__UpperCAmelCase )
else:
_A = self.match_in_pattern(self.text[mismatch_index] )
_A = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
lowerCamelCase_ = '''ABAABA'''
lowerCamelCase_ = '''AB'''
lowerCamelCase_ = BoyerMooreSearch(text, pattern)
lowerCamelCase_ = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 174 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''camembert'''
def __init__( self : Union[str, Any] , __UpperCAmelCase : int=30522 , __UpperCAmelCase : List[str]=768 , __UpperCAmelCase : Optional[Any]=12 , __UpperCAmelCase : Optional[Any]=12 , __UpperCAmelCase : Dict=3072 , __UpperCAmelCase : Any="gelu" , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : List[str]=0.1 , __UpperCAmelCase : List[Any]=512 , __UpperCAmelCase : Tuple=2 , __UpperCAmelCase : List[str]=0.02 , __UpperCAmelCase : List[str]=1E-12 , __UpperCAmelCase : Any=1 , __UpperCAmelCase : Optional[Any]=0 , __UpperCAmelCase : str=2 , __UpperCAmelCase : int="absolute" , __UpperCAmelCase : Any=True , __UpperCAmelCase : int=None , **__UpperCAmelCase : List[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = hidden_act
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = initializer_range
_A = layer_norm_eps
_A = position_embedding_type
_A = use_cache
_A = classifier_dropout
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
@property
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
if self.task == "multiple-choice":
_A = {0: "batch", 1: "choice", 2: "sequence"}
else:
_A = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 174 | 1 |
_A = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def lowerCamelCase__ ( ):
"""simple docstring"""
lowerCAmelCase_ = input("Enter message: " )
lowerCAmelCase_ = input("Enter key [alphanumeric]: " )
lowerCAmelCase_ = input("Encrypt/Decrypt [e/d]: " )
if mode.lower().startswith("e" ):
lowerCAmelCase_ = """encrypt"""
lowerCAmelCase_ = encrypt_message(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif mode.lower().startswith("d" ):
lowerCAmelCase_ = """decrypt"""
lowerCAmelCase_ = decrypt_message(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(F"""\n{mode.title()}ed message:""" )
print(SCREAMING_SNAKE_CASE__ )
def lowerCamelCase__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple ):
"""simple docstring"""
return translate_message(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , "encrypt" )
def lowerCamelCase__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
return translate_message(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , "decrypt" )
def lowerCamelCase__ ( __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str ):
"""simple docstring"""
lowerCAmelCase_ = []
lowerCAmelCase_ = 0
lowerCAmelCase_ = key.upper()
for symbol in message:
lowerCAmelCase_ = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(SCREAMING_SNAKE_CASE__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase_ = 0
else:
translated.append(SCREAMING_SNAKE_CASE__ )
return "".join(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 231 |
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__UpperCAmelCase : Optional[int] = trt.Logger(trt.Logger.WARNING)
__UpperCAmelCase : Tuple = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__UpperCAmelCase : Optional[Any] = logging.getLogger(__name__)
__UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--onnx_model_path",
default=None,
type=str,
required=True,
help="Path to ONNX model: ",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model checkpoints and predictions will be written.",
)
# Other parameters
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
required=True,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--version_2_with_negative",
action="store_true",
help="If true, the SQuAD examples contain some that do not have an answer.",
)
parser.add_argument(
"--null_score_diff_threshold",
type=float,
default=0.0,
help="If null_score - best_non_null is greater than the threshold predict null.",
)
parser.add_argument(
"--max_seq_length",
default=384,
type=int,
help=(
"The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded."
),
)
parser.add_argument(
"--doc_stride",
default=128,
type=int,
help="When splitting up a long document into chunks, how much stride to take between chunks.",
)
parser.add_argument("--per_device_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.")
parser.add_argument(
"--n_best_size",
default=20,
type=int,
help="The total number of n-best predictions to generate in the nbest_predictions.json output file.",
)
parser.add_argument(
"--max_answer_length",
default=30,
type=int,
help=(
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
),
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--dataset_name",
type=str,
default=None,
required=True,
help="The name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--dataset_config_name",
type=str,
default=None,
help="The configuration name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--preprocessing_num_workers", type=int, default=4, help="A csv or a json file containing the training data."
)
parser.add_argument("--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision instead of 32-bit",
)
parser.add_argument(
"--int8",
action="store_true",
help="Whether to use INT8",
)
__UpperCAmelCase : Tuple = parser.parse_args()
if args.tokenizer_name:
__UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
logger.info("Training/evaluation parameters %s", args)
__UpperCAmelCase : Optional[Any] = args.per_device_eval_batch_size
__UpperCAmelCase : Dict = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : str = "temp_engine/bert-fp32.engine"
if args.fpaa:
__UpperCAmelCase : Tuple = "temp_engine/bert-fp16.engine"
if args.inta:
__UpperCAmelCase : List[Any] = "temp_engine/bert-int8.engine"
# import ONNX file
if not os.path.exists("temp_engine"):
os.makedirs("temp_engine")
__UpperCAmelCase : Optional[int] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, "rb") as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__UpperCAmelCase : int = [network.get_input(i) for i in range(network.num_inputs)]
__UpperCAmelCase : List[Any] = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__UpperCAmelCase : Optional[Any] = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__UpperCAmelCase : Any = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__UpperCAmelCase : Union[str, Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, "wb") as f:
f.write(engine.serialize())
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> str:
__snake_case: Tuple = np.asarray(inputs["""input_ids"""] , dtype=np.intaa)
__snake_case: Union[str, Any] = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa)
__snake_case: List[str] = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa)
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , SCREAMING_SNAKE_CASE__)
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , SCREAMING_SNAKE_CASE__)
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , SCREAMING_SNAKE_CASE__)
# start time
__snake_case: int = time.time()
# Run inference
context.execute_async(
bindings=[int(SCREAMING_SNAKE_CASE__) for d_inp in d_inputs] + [int(SCREAMING_SNAKE_CASE__), int(SCREAMING_SNAKE_CASE__)] , stream_handle=stream.handle)
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
cuda.memcpy_dtoh_async(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
# Synchronize the stream and take time
stream.synchronize()
# end time
__snake_case: Optional[Any] = time.time()
__snake_case: Dict = end_time - start_time
__snake_case: Any = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__UpperCAmelCase : Union[str, Any] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__UpperCAmelCase : Union[str, Any] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError("Evaluation requires a dataset name")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__UpperCAmelCase : str = raw_datasets["validation"].column_names
__UpperCAmelCase : Dict = "question" if "question" in column_names else column_names[0]
__UpperCAmelCase : List[Any] = "context" if "context" in column_names else column_names[1]
__UpperCAmelCase : List[str] = "answers" if "answers" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__UpperCAmelCase : List[str] = tokenizer.padding_side == "right"
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'
f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'
)
__UpperCAmelCase : Union[str, Any] = min(args.max_seq_length, tokenizer.model_max_length)
def A__ ( SCREAMING_SNAKE_CASE__) -> Optional[int]:
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
__snake_case: Optional[int] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
__snake_case: List[str] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=SCREAMING_SNAKE_CASE__ , stride=args.doc_stride , return_overflowing_tokens=SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
__snake_case: Optional[Any] = tokenized_examples.pop("""overflow_to_sample_mapping""")
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
__snake_case: int = []
for i in range(len(tokenized_examples["""input_ids"""])):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
__snake_case: int = tokenized_examples.sequence_ids(SCREAMING_SNAKE_CASE__)
__snake_case: List[Any] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
__snake_case: Any = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index])
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
__snake_case: Dict = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i])
]
return tokenized_examples
__UpperCAmelCase : int = raw_datasets["validation"]
# Validation Feature Creation
__UpperCAmelCase : Dict = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="Running tokenizer on validation dataset",
)
__UpperCAmelCase : Dict = default_data_collator
__UpperCAmelCase : List[Any] = eval_dataset.remove_columns(["example_id", "offset_mapping"])
__UpperCAmelCase : str = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="eval") -> Optional[int]:
# Post-processing: we match the start logits and end logits to answers in the original context.
__snake_case: Optional[Any] = postprocess_qa_predictions(
examples=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , predictions=SCREAMING_SNAKE_CASE__ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=SCREAMING_SNAKE_CASE__ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
__snake_case: Tuple = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
__snake_case: str = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
__snake_case: Optional[Any] = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=SCREAMING_SNAKE_CASE__ , label_ids=SCREAMING_SNAKE_CASE__)
__UpperCAmelCase : List[str] = load_metric("squad_v2" if args.version_2_with_negative else "squad")
# Evaluation!
logger.info("Loading ONNX model %s for evaluation", args.onnx_model_path)
with open(engine_name, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def A__ ( SCREAMING_SNAKE_CASE__) -> Union[str, Any]:
return trt.volume(engine.get_binding_shape(SCREAMING_SNAKE_CASE__)) * engine.get_binding_dtype(SCREAMING_SNAKE_CASE__).itemsize
# Allocate device memory for inputs and outputs.
__UpperCAmelCase : int = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__UpperCAmelCase : Optional[int] = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__UpperCAmelCase : Any = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__UpperCAmelCase : Union[str, Any] = cuda.mem_alloc(h_outputa.nbytes)
__UpperCAmelCase : Optional[Any] = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__UpperCAmelCase : Optional[int] = cuda.Stream()
# Evaluation
logger.info("***** Running Evaluation *****")
logger.info(f' Num examples = {len(eval_dataset)}')
logger.info(f' Batch size = {args.per_device_eval_batch_size}')
__UpperCAmelCase : Optional[Any] = 0.0
__UpperCAmelCase : Optional[int] = 0
__UpperCAmelCase : Any = timeit.default_timer()
__UpperCAmelCase : Union[str, Any] = None
for step, batch in enumerate(eval_dataloader):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__UpperCAmelCase , __UpperCAmelCase : str = outputs
__UpperCAmelCase : Any = torch.tensor(start_logits)
__UpperCAmelCase : Tuple = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__UpperCAmelCase : Optional[Any] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__UpperCAmelCase : int = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__UpperCAmelCase : List[str] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__UpperCAmelCase : List[str] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__UpperCAmelCase : Union[str, Any] = nested_truncate(all_preds, len(eval_dataset))
__UpperCAmelCase : List[str] = timeit.default_timer() - start_time
logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info("Average Inference Time = {:.3f} ms".format(total_time * 1_000 / niter))
logger.info("Total Inference Time = {:.3f} ms".format(total_time * 1_000))
logger.info("Total Number of Inference = %d", niter)
__UpperCAmelCase : List[Any] = post_processing_function(eval_examples, eval_dataset, all_preds)
__UpperCAmelCase : Optional[int] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'Evaluation metrics: {eval_metric}')
| 111 | 0 |
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase ):
_lowerCamelCase : str = size
_lowerCamelCase : Any = [0] * size
_lowerCamelCase : Tuple = [0] * size
@staticmethod
def A_ ( lowercase ):
return index | (index + 1)
@staticmethod
def A_ ( lowercase ):
return (index & (index + 1)) - 1
def A_ ( self , lowercase , lowercase ):
_lowerCamelCase : int = value
while index < self.size:
_lowerCamelCase : Any = self.get_prev(lowercase ) + 1
if current_left_border == index:
_lowerCamelCase : int = value
else:
_lowerCamelCase : List[Any] = max(lowercase , lowercase , lowercase )
_lowerCamelCase : List[str] = self.get_next(lowercase )
def A_ ( self , lowercase , lowercase ):
right -= 1 # Because of right is exclusive
_lowerCamelCase : Optional[Any] = 0
while left <= right:
_lowerCamelCase : Tuple = self.get_prev(lowercase )
if left <= current_left:
_lowerCamelCase : Tuple = max(lowercase , self.tree[right] )
_lowerCamelCase : Dict = current_left
else:
_lowerCamelCase : str = max(lowercase , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod() | 352 |
"""simple docstring"""
import os
import string
import sys
lowercase__ = 1 << 8
lowercase__ = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
lowercase__ = KEYMAP["""up"""]
lowercase__ = KEYMAP["""left"""]
if sys.platform == "win32":
lowercase__ = []
lowercase__ = {
B"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
B"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
lowercase__ = ord(str(i))
def _snake_case ( ):
if os.name == "nt":
import msvcrt
_lowerCamelCase : Any = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(lowercase__ ) == 0:
# Read the keystroke
_lowerCamelCase : str = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
_lowerCamelCase : List[Any] = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
_lowerCamelCase : Union[str, Any] = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(lowercase__ )
if ord(lowercase__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
_lowerCamelCase : List[Any] = chr(KEYMAP['esc'] )
except KeyError:
_lowerCamelCase : int = cha[1]
else:
_lowerCamelCase : Optional[int] = ch.decode(lowercase__ )
else:
_lowerCamelCase : Union[str, Any] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
_lowerCamelCase : List[str] = sys.stdin.fileno()
_lowerCamelCase : Tuple = termios.tcgetattr(lowercase__ )
try:
tty.setraw(lowercase__ )
_lowerCamelCase : Optional[Any] = sys.stdin.read(1 )
finally:
termios.tcsetattr(lowercase__ , termios.TCSADRAIN , lowercase__ )
return ch
def _snake_case ( ):
_lowerCamelCase : int = get_raw_chars()
if ord(lowercase__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(lowercase__ ) == KEYMAP["esc"]:
_lowerCamelCase : Union[str, Any] = get_raw_chars()
if ord(lowercase__ ) == KEYMAP["mod_int"]:
_lowerCamelCase : List[Any] = get_raw_chars()
if ord(lowercase__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(lowercase__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(lowercase__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"] | 12 | 0 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase = 1_00 ) -> int:
lowercase__ : Optional[Any] = n * (n + 1) * (2 * n + 1) / 6
lowercase__ : List[Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 16 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
__A = logging.get_logger(__name__)
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : Dict = ["""pixel_values"""]
def __init__( self : List[Any] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 2_5_5 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , **UpperCamelCase__ : Optional[int] , )-> None:
'''simple docstring'''
super().__init__(**UpperCamelCase__)
__lowerCAmelCase: int = size if size is not None else {"shortest_edge": 2_5_6}
__lowerCAmelCase: str = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__)
__lowerCAmelCase: Any = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
__lowerCAmelCase: Optional[Any] = get_size_dict(UpperCamelCase__ , param_name="crop_size")
__lowerCAmelCase: str = do_resize
__lowerCAmelCase: Any = size
__lowerCAmelCase: Dict = resample
__lowerCAmelCase: Tuple = do_center_crop
__lowerCAmelCase: str = crop_size
__lowerCAmelCase: List[Any] = do_rescale
__lowerCAmelCase: int = rescale_factor
__lowerCAmelCase: List[Any] = do_normalize
__lowerCAmelCase: Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowerCAmelCase: Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase_ ( self : Tuple , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Dict , )-> np.ndarray:
'''simple docstring'''
__lowerCAmelCase: int = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__)
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}")
__lowerCAmelCase: Optional[Any] = get_resize_output_image_size(UpperCamelCase__ , size=size["shortest_edge"] , default_to_square=UpperCamelCase__)
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__)
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , )-> np.ndarray:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = get_size_dict(UpperCamelCase__)
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}")
return center_crop(UpperCamelCase__ , size=(size["height"], size["width"]) , data_format=UpperCamelCase__ , **UpperCamelCase__)
def lowercase_ ( self : str , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : float , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[int])-> np.ndarray:
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__)
def lowercase_ ( self : Dict , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[Any] , )-> np.ndarray:
'''simple docstring'''
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__)
def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : Optional[Any] , )-> Dict:
'''simple docstring'''
__lowerCAmelCase: Any = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase: str = size if size is not None else self.size
__lowerCAmelCase: Tuple = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__)
__lowerCAmelCase: List[str] = resample if resample is not None else self.resample
__lowerCAmelCase: str = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCAmelCase: Tuple = crop_size if crop_size is not None else self.crop_size
__lowerCAmelCase: List[Any] = get_size_dict(UpperCamelCase__ , param_name="crop_size")
__lowerCAmelCase: List[Any] = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase: Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase: Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
__lowerCAmelCase: Union[str, Any] = image_mean if image_mean is not None else self.image_mean
__lowerCAmelCase: Tuple = image_std if image_std is not None else self.image_std
__lowerCAmelCase: Union[str, Any] = make_list_of_images(UpperCamelCase__)
if not valid_images(UpperCamelCase__):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True.")
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
# All transformations expect numpy arrays.
__lowerCAmelCase: Tuple = [to_numpy_array(UpperCamelCase__) for image in images]
if do_resize:
__lowerCAmelCase: Union[str, Any] = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__) for image in images]
if do_center_crop:
__lowerCAmelCase: Optional[Any] = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__) for image in images]
if do_rescale:
__lowerCAmelCase: Optional[Any] = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__) for image in images]
if do_normalize:
__lowerCAmelCase: List[str] = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__) for image in images]
__lowerCAmelCase: Optional[Any] = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__) for image in images]
__lowerCAmelCase: List[str] = {"pixel_values": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__)
def lowercase_ ( self : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Tuple] = None)-> Dict:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCamelCase__) != len(UpperCamelCase__):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits")
if is_torch_tensor(UpperCamelCase__):
__lowerCAmelCase: Optional[int] = target_sizes.numpy()
__lowerCAmelCase: List[Any] = []
for idx in range(len(UpperCamelCase__)):
__lowerCAmelCase: List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode="bilinear" , align_corners=UpperCamelCase__)
__lowerCAmelCase: Optional[Any] = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(UpperCamelCase__)
else:
__lowerCAmelCase: Tuple = logits.argmax(dim=1)
__lowerCAmelCase: Optional[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 217 | 0 |
"""simple docstring"""
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = []
_UpperCAmelCase = 1
while len(SCREAMING_SNAKE_CASE_ ) < 1E6:
constant.append(str(SCREAMING_SNAKE_CASE_ ) )
i += 1
_UpperCAmelCase = ''''''.join(SCREAMING_SNAKE_CASE_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 366 |
"""simple docstring"""
import math
def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 0 , _SCREAMING_SNAKE_CASE : int = 0 ):
'''simple docstring'''
_UpperCAmelCase = end or len(_SCREAMING_SNAKE_CASE )
for i in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = i
_UpperCAmelCase = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
_UpperCAmelCase = array[temp_index - 1]
temp_index -= 1
_UpperCAmelCase = temp_index_value
return array
def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): # Max Heap
'''simple docstring'''
_UpperCAmelCase = index
_UpperCAmelCase = 2 * index + 1 # Left Node
_UpperCAmelCase = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
_UpperCAmelCase = left_index
if right_index < heap_size and array[largest] < array[right_index]:
_UpperCAmelCase = right_index
if largest != index:
_UpperCAmelCase , _UpperCAmelCase = array[largest], array[index]
heapify(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
for i in range(n // 2 , -1 , -1 ):
heapify(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for i in range(n - 1 , 0 , -1 ):
_UpperCAmelCase , _UpperCAmelCase = array[0], array[i]
heapify(_SCREAMING_SNAKE_CASE , 0 , _SCREAMING_SNAKE_CASE )
return array
def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = low
_UpperCAmelCase = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
_UpperCAmelCase , _UpperCAmelCase = array[j], array[i]
i += 1
def lowercase ( _SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
if len(_SCREAMING_SNAKE_CASE ) == 0:
return array
_UpperCAmelCase = 2 * math.ceil(math.loga(len(_SCREAMING_SNAKE_CASE ) ) )
_UpperCAmelCase = 16
return intro_sort(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(_SCREAMING_SNAKE_CASE )
max_depth -= 1
_UpperCAmelCase = median_of_a(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , start + ((end - start) // 2) + 1 , end - 1 )
_UpperCAmelCase = partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
intro_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = p
return insertion_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : List[str] = input("Enter numbers separated by a comma : ").strip()
__A : Optional[Any] = [float(item) for item in user_input.split(",")]
print(sort(unsorted))
| 326 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
lowerCamelCase : Optional[int] = None
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase : Union[str, Any] = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'
),
},
}
lowerCamelCase : Any = {
'moussaKam/mbarthez': 1_0_2_4,
'moussaKam/barthez': 1_0_2_4,
'moussaKam/barthez-orangesum-title': 1_0_2_4,
}
lowerCamelCase : str = '▁'
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ["""input_ids""", """attention_mask"""]
_snake_case = BarthezTokenizer
def __init__( self , A=None , A=None , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , **A , ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
snake_case : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
A , tokenizer_file=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , **A , )
snake_case : Optional[Any] = vocab_file
snake_case : Optional[Any] = False if not self.vocab_file else True
def UpperCAmelCase ( self , A , A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case : Any = [self.cls_token_id]
snake_case : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase ( self , A , A = None ) -> List[int]:
snake_case : Tuple = [self.sep_token_id]
snake_case : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : int = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,)
| 124 |
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowercase :
"""simple docstring"""
def __init__( self , A , A=1_3 , A=7 , A=True , A=True , A=True , A=True , A=9_9 , A=3_2 , A=5 , A=4 , A=3_7 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=1_6 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> Dict:
snake_case : Any = parent
snake_case : List[Any] = batch_size
snake_case : List[Any] = seq_length
snake_case : Dict = is_training
snake_case : List[str] = use_input_mask
snake_case : List[str] = use_token_type_ids
snake_case : Dict = use_labels
snake_case : Optional[int] = vocab_size
snake_case : Optional[int] = hidden_size
snake_case : Optional[Any] = num_hidden_layers
snake_case : Optional[Any] = num_attention_heads
snake_case : Union[str, Any] = intermediate_size
snake_case : List[Any] = hidden_act
snake_case : int = hidden_dropout_prob
snake_case : str = attention_probs_dropout_prob
snake_case : List[Any] = max_position_embeddings
snake_case : List[Any] = type_vocab_size
snake_case : int = type_sequence_label_size
snake_case : Optional[int] = initializer_range
snake_case : Union[str, Any] = num_labels
snake_case : List[str] = num_choices
snake_case : Optional[int] = scope
def UpperCAmelCase ( self ) -> int:
snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : Union[str, Any] = None
if self.use_input_mask:
snake_case : str = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : Tuple = None
if self.use_token_type_ids:
snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case : Optional[int] = None
snake_case : str = None
snake_case : List[Any] = None
if self.use_labels:
snake_case : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case : int = ids_tensor([self.batch_size] , self.num_choices )
snake_case : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self ) -> Any:
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , )
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> List[str]:
snake_case : Tuple = NystromformerModel(config=A )
model.to(A )
model.eval()
snake_case : str = model(A , attention_mask=A , token_type_ids=A )
snake_case : List[str] = model(A , token_type_ids=A )
snake_case : Any = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> Union[str, Any]:
snake_case : List[Any] = NystromformerForMaskedLM(config=A )
model.to(A )
model.eval()
snake_case : List[Any] = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> Optional[Any]:
snake_case : List[Any] = NystromformerForQuestionAnswering(config=A )
model.to(A )
model.eval()
snake_case : Dict = model(
A , attention_mask=A , token_type_ids=A , start_positions=A , end_positions=A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> Tuple:
snake_case : Optional[int] = self.num_labels
snake_case : Union[str, Any] = NystromformerForSequenceClassification(A )
model.to(A )
model.eval()
snake_case : Union[str, Any] = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> int:
snake_case : Union[str, Any] = self.num_labels
snake_case : Union[str, Any] = NystromformerForTokenClassification(config=A )
model.to(A )
model.eval()
snake_case : int = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> Any:
snake_case : List[Any] = self.num_choices
snake_case : Union[str, Any] = NystromformerForMultipleChoice(config=A )
model.to(A )
model.eval()
snake_case : Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : List[str] = model(
A , attention_mask=A , token_type_ids=A , labels=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Tuple = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) : Any = config_and_inputs
snake_case : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowercase (UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
_snake_case = (
{
"""feature-extraction""": NystromformerModel,
"""fill-mask""": NystromformerForMaskedLM,
"""question-answering""": NystromformerForQuestionAnswering,
"""text-classification""": NystromformerForSequenceClassification,
"""token-classification""": NystromformerForTokenClassification,
"""zero-shot""": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
_snake_case = False
_snake_case = False
def UpperCAmelCase ( self ) -> str:
snake_case : Dict = NystromformerModelTester(self )
snake_case : str = ConfigTester(self , config_class=A , hidden_size=3_7 )
def UpperCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> Any:
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case : int = type
self.model_tester.create_and_check_model(*A )
def UpperCAmelCase ( self ) -> Dict:
snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A )
def UpperCAmelCase ( self ) -> Any:
snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
@slow
def UpperCAmelCase ( self ) -> Optional[int]:
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : Any = NystromformerModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_torch
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase ( self ) -> Dict:
snake_case : Optional[int] = NystromformerModel.from_pretrained("""uw-madison/nystromformer-512""" )
snake_case : int = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
snake_case : Optional[int] = model(A )[0]
snake_case : List[Any] = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape , A )
snake_case : Union[str, Any] = torch.tensor(
[[[-0.45_32, -0.09_36, 0.51_37], [-0.26_76, 0.06_28, 0.61_86], [-0.36_29, -0.17_26, 0.47_16]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , A , atol=1e-4 ) )
@slow
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Union[str, Any] = """the [MASK] of Belgium is Brussels"""
snake_case : Any = AutoTokenizer.from_pretrained("""uw-madison/nystromformer-512""" )
snake_case : Union[str, Any] = NystromformerForMaskedLM.from_pretrained("""uw-madison/nystromformer-512""" )
snake_case : int = tokenizer(A , return_tensors="""pt""" )
with torch.no_grad():
snake_case : Optional[int] = model(encoding.input_ids ).logits
snake_case : str = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(A ) , """capital""" )
| 124 | 1 |
def A__ ( lowerCamelCase ) -> list:
UpperCamelCase_: Dict = False
while is_sorted is False: # Until all the indices are traversed keep looping
UpperCamelCase_: Dict = True
for i in range(0 , len(lowerCamelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
UpperCamelCase_, UpperCamelCase_: Tuple = input_list[i + 1], input_list[i]
# swapping if elements not in order
UpperCamelCase_: List[Any] = False
for i in range(1 , len(lowerCamelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
UpperCamelCase_, UpperCamelCase_: List[Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
UpperCamelCase_: Optional[int] = False
return input_list
if __name__ == "__main__":
print("""Enter list to be sorted""")
lowerCamelCase_ : Optional[int] = [int(x) for x in input().split()]
# inputing elements of the list in one line
lowerCamelCase_ : Dict = odd_even_sort(input_list)
print("""The sorted list is""")
print(sorted_list)
| 223 |
def A__ ( lowerCamelCase , lowerCamelCase ) -> list:
UpperCamelCase_: Optional[int] = word.split()
def justify(lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> str:
UpperCamelCase_: Tuple = max_width - width
UpperCamelCase_: Optional[Any] = len(lowerCamelCase )
if len(lowerCamelCase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
UpperCamelCase_: List[Any] = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
UpperCamelCase_: Optional[Any] = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
UpperCamelCase_: List[str] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(lowerCamelCase ):
num_spaces_between_words_list[i] += 1
UpperCamelCase_: Dict = []
for i in range(lowerCamelCase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * """ """ )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(lowerCamelCase )
UpperCamelCase_: Optional[int] = []
UpperCamelCase_: list[str] = []
UpperCamelCase_: List[str] = 0
for word in words:
if width + len(lowerCamelCase ) + len(lowerCamelCase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(lowerCamelCase )
width += len(lowerCamelCase )
else:
# justify the line and add it to result
answer.append(justify(lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
# reset new line and new width
UpperCamelCase_, UpperCamelCase_: List[str] = [word], len(lowerCamelCase )
UpperCamelCase_: List[str] = max_width - width - len(lowerCamelCase )
answer.append(""" """.join(lowerCamelCase ) + (remaining_spaces + 1) * """ """ )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 223 | 1 |
"""simple docstring"""
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
if exponent == 1:
return base
if exponent % 2 == 0:
_lowerCamelCase : int = _modexpt(lowercase__ , exponent // 2 , lowercase__ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(lowercase__ , exponent - 1 , lowercase__ )) % modulo_value
def _snake_case ( lowercase__ = 1777 , lowercase__ = 1855 , lowercase__ = 8 ):
_lowerCamelCase : Dict = base
for _ in range(1 , lowercase__ ):
_lowerCamelCase : Union[str, Any] = _modexpt(lowercase__ , lowercase__ , 10**digits )
return result
if __name__ == "__main__":
print(F"{solution() = }") | 96 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
_lowerCamelCase : List[str] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , lowercase : Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
'''simple docstring'''
super().__init__()
_snake_case = nn.ModuleList(lowercase )
def A ( self : Optional[int] , lowercase : torch.FloatTensor , lowercase : Union[torch.Tensor, float, int] , lowercase : torch.Tensor , lowercase : List[torch.tensor] , lowercase : List[float] , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[Dict[str, Any]] = None , lowercase : bool = False , lowercase : bool = True , ):
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(lowercase , lowercase , self.nets ) ):
_snake_case , _snake_case = controlnet(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , )
# merge samples
if i == 0:
_snake_case , _snake_case = down_samples, mid_sample
else:
_snake_case = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowercase , lowercase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def A ( self : Dict , lowercase : Union[str, os.PathLike] , lowercase : bool = True , lowercase : Callable = None , lowercase : bool = False , lowercase : Optional[str] = None , ):
'''simple docstring'''
_snake_case = 0
_snake_case = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowercase , is_main_process=lowercase , save_function=lowercase , safe_serialization=lowercase , variant=lowercase , )
idx += 1
_snake_case = model_path_to_save + f'''_{idx}'''
@classmethod
def A ( cls : Any , lowercase : Optional[Union[str, os.PathLike]] , **lowercase : List[str] ):
'''simple docstring'''
_snake_case = 0
_snake_case = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_snake_case = pretrained_model_path
while os.path.isdir(lowercase ):
_snake_case = ControlNetModel.from_pretrained(lowercase , **lowercase )
controlnets.append(lowercase )
idx += 1
_snake_case = pretrained_model_path + f'''_{idx}'''
logger.info(f'''{len(lowercase )} controlnets loaded from {pretrained_model_path}.''' )
if len(lowercase ) == 0:
raise ValueError(
f'''No ControlNets found under {os.path.dirname(lowercase )}. Expected at least {pretrained_model_path + '_0'}.''' )
return cls(lowercase ) | 282 | 0 |
import argparse
import json
import subprocess
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
lowerCAmelCase__ : Tuple = []
lowerCAmelCase__ : Optional[Any] = (
F'''curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'''
' https://api.github.com/repos/huggingface/transformers/actions/runners'
)
lowerCAmelCase__ : List[str] = subprocess.run(SCREAMING_SNAKE_CASE_ , shell=SCREAMING_SNAKE_CASE_ , stdout=subprocess.PIPE )
lowerCAmelCase__ : Any = output.stdout.decode('utf-8' )
lowerCAmelCase__ : Tuple = json.loads(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = status['runners']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(SCREAMING_SNAKE_CASE_ )
# save the result so we can report them on Slack
with open('offline_runners.txt' , 'w' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
lowerCAmelCase__ : Optional[int] = '\n'.join([x['name'] for x in offline_runners] )
raise ValueError(F'''The following runners are offline:\n{failed}''' )
if __name__ == "__main__":
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
return values.split(',' )
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--target_runners""",
default=None,
type=list_str,
required=True,
help="""Comma-separated list of runners to check status.""",
)
parser.add_argument(
"""--token""", default=None, type=str, required=True, help="""A token that has actions:read permission."""
)
lowerCamelCase__ = parser.parse_args()
get_runner_status(args.target_runners, args.token) | 358 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class A__ ( __magic_name__ ):
lowercase = (DPMSolverSDEScheduler,)
lowercase = 10
def _lowerCamelCase ( self : Optional[int] , **a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = {
'num_train_timesteps': 1_100,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'noise_sampler_seed': 0,
}
config.update(**a )
return config
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=a , beta_end=a )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.scheduler_classes[0]
lowerCAmelCase__ : Tuple = self.get_scheduler_config()
lowerCAmelCase__ : List[Any] = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Dict = self.dummy_model()
lowerCAmelCase__ : int = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : int = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : List[Any] = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : str = model(a , a )
lowerCAmelCase__ : int = scheduler.step(a , a , a )
lowerCAmelCase__ : Any = output.prev_sample
lowerCAmelCase__ : List[Any] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Optional[int] = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_7_8_2_1_0_4_4_9_2_1_8_7_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_7_8_7_0_5_9_6_4_5_6_5_2_7_7 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_2_1_1_1_8_1_6_4_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_6_8_9_2_2_9_9_6_5_2 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.scheduler_classes[0]
lowerCAmelCase__ : List[str] = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCAmelCase__ : Any = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Optional[int] = self.dummy_model()
lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : Any = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : str = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : str = model(a , a )
lowerCAmelCase__ : Dict = scheduler.step(a , a , a )
lowerCAmelCase__ : Tuple = output.prev_sample
lowerCAmelCase__ : int = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Union[str, Any] = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_2_4.7_7_1_4_9_2_0_0_4_3_9_4_5_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_2_2_6_2_8_9_0_1_4_8_1_6_2_8_4 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_2_8.1_6_6_3_3_6_0_5_9_5_7_0_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_6_8_8_3_2_6_0_0_1_1_6_7_2_9_7 ) < 1E-3
else:
assert abs(result_sum.item() - 1_1_9.8_4_8_7_5_4_8_8_2_8_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.1_5_6_0_5_3_0_6_6_2_5_3_6_6_2_1 ) < 1E-3
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase__ : Optional[int] = self.get_scheduler_config()
lowerCAmelCase__ : int = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
lowerCAmelCase__ : Tuple = self.dummy_model()
lowerCAmelCase__ : Any = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCAmelCase__ : Dict = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : Optional[int] = model(a , a )
lowerCAmelCase__ : Tuple = scheduler.step(a , a , a )
lowerCAmelCase__ : Dict = output.prev_sample
lowerCAmelCase__ : Union[str, Any] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Dict = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_6_9_5_7_3_9_7_4_6_0_9_3_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_8_0_5_9_3_4_6_0_7_9_8_2_6_3_5 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_3_6_3_7_6_9_5_3_1_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_8_3_8_2_4_1_5_7_7_1 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.scheduler_classes[0]
lowerCAmelCase__ : Any = self.get_scheduler_config()
lowerCAmelCase__ : Any = scheduler_class(**a , use_karras_sigmas=a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
lowerCAmelCase__ : str = self.dummy_model()
lowerCAmelCase__ : Any = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma
lowerCAmelCase__ : str = sample.to(a )
for t in scheduler.timesteps:
lowerCAmelCase__ : Any = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : int = model(a , a )
lowerCAmelCase__ : Union[str, Any] = scheduler.step(a , a , a )
lowerCAmelCase__ : Union[str, Any] = output.prev_sample
lowerCAmelCase__ : Optional[int] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Any = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_7_6.6_6_9_7_4_1_3_5_7_4_2_1_8_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_7.6_3_6_5_3_5_6_4_4_5_3_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
else:
assert abs(result_sum.item() - 1_7_0.3_1_3_5_2_2_3_3_8_8_6_7_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2 | 307 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ["pixel_values"]
def __init__( self : Tuple , lowercase_ : bool = True , lowercase_ : Optional[Dict[str, int]] = None , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 255 , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , **lowercase_ : str , ):
'''simple docstring'''
super().__init__(**lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = size if size is not None else {'''shortest_edge''': 256}
SCREAMING_SNAKE_CASE_ : str = get_size_dict(lowercase_ , default_to_square=lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
SCREAMING_SNAKE_CASE_ : int = get_size_dict(lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = do_resize
SCREAMING_SNAKE_CASE_ : int = size
SCREAMING_SNAKE_CASE_ : List[Any] = resample
SCREAMING_SNAKE_CASE_ : int = do_center_crop
SCREAMING_SNAKE_CASE_ : List[Any] = crop_size
SCREAMING_SNAKE_CASE_ : List[Any] = do_rescale
SCREAMING_SNAKE_CASE_ : Optional[int] = rescale_factor
SCREAMING_SNAKE_CASE_ : str = do_normalize
SCREAMING_SNAKE_CASE_ : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE_ : List[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[int] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = get_size_dict(lowercase_ , default_to_square=lowercase_)
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}')
SCREAMING_SNAKE_CASE_ : int = get_resize_output_image_size(lowercase_ , size=size['''shortest_edge'''] , default_to_square=lowercase_)
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : int , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = get_size_dict(lowercase_)
return center_crop(lowercase_ , size=(size['''height'''], size['''width''']) , data_format=lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : np.ndarray , lowercase_ : float , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : List[str]):
'''simple docstring'''
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : str , ):
'''simple docstring'''
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : ImageInput , lowercase_ : Optional[bool] = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[float] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowercase_ : Dict , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_ : Union[str, Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE_ : int = get_size_dict(lowercase_ , default_to_square=lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_ : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE_ : List[str] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE_ : Any = get_size_dict(lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_ : str = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_ : List[Any] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_ : Optional[int] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_ : List[Any] = make_list_of_images(lowercase_)
if not valid_images(lowercase_):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''')
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''')
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [to_numpy_array(lowercase_) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE_ : Tuple = [self.center_crop(image=lowercase_ , size=lowercase_) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_ : int = [self.rescale(image=lowercase_ , scale=lowercase_) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_) for image in images]
SCREAMING_SNAKE_CASE_ : Any = [to_channel_dimension_format(lowercase_ , lowercase_) for image in images]
SCREAMING_SNAKE_CASE_ : Dict = {'''pixel_values''': images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_)
| 91 |
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
UpperCAmelCase_ : Any = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( _a ):
def __init__( self : Optional[int] , **__lowerCamelCase : Optional[int] ):
requires_backends(self , ["""bs4"""] )
super().__init__(**__lowerCamelCase )
def _A ( self : List[str] , __lowerCamelCase : Any ):
UpperCamelCase :Optional[int] = []
UpperCamelCase :List[str] = []
UpperCamelCase :Union[str, Any] = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
UpperCamelCase :Optional[Any] = parent.find_all(child.name , recursive=__lowerCamelCase )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(__lowerCamelCase ) else next(i for i, s in enumerate(__lowerCamelCase , 1 ) if s is child ) )
UpperCamelCase :Any = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def _A ( self : Any , __lowerCamelCase : Tuple ):
UpperCamelCase :Any = BeautifulSoup(__lowerCamelCase , """html.parser""" )
UpperCamelCase :Union[str, Any] = []
UpperCamelCase :Tuple = []
UpperCamelCase :Tuple = []
for element in html_code.descendants:
if type(__lowerCamelCase ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
UpperCamelCase :Any = html.unescape(__lowerCamelCase ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(__lowerCamelCase )
UpperCamelCase , UpperCamelCase :Optional[Any] = self.xpath_soup(__lowerCamelCase )
stringaxtag_seq.append(__lowerCamelCase )
stringaxsubs_seq.append(__lowerCamelCase )
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError("""Number of doc strings and xtags does not correspond""" )
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError("""Number of doc strings and xsubs does not correspond""" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def _A ( self : int , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] ):
UpperCamelCase :Tuple = """"""
for tagname, subs in zip(__lowerCamelCase , __lowerCamelCase ):
xpath += F"""/{tagname}"""
if subs != 0:
xpath += F"""[{subs}]"""
return xpath
def __call__( self : Any , __lowerCamelCase : Dict ):
UpperCamelCase :Any = False
# Check that strings has a valid type
if isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase :List[Any] = True
elif isinstance(__lowerCamelCase , (list, tuple) ):
if len(__lowerCamelCase ) == 0 or isinstance(html_strings[0] , __lowerCamelCase ):
UpperCamelCase :Any = True
if not valid_strings:
raise ValueError(
"""HTML strings must of type `str`, `List[str]` (batch of examples), """
F"""but is of type {type(__lowerCamelCase )}.""" )
UpperCamelCase :str = bool(isinstance(__lowerCamelCase , (list, tuple) ) and (isinstance(html_strings[0] , __lowerCamelCase )) )
if not is_batched:
UpperCamelCase :Any = [html_strings]
# Get nodes + xpaths
UpperCamelCase :Union[str, Any] = []
UpperCamelCase :str = []
for html_string in html_strings:
UpperCamelCase , UpperCamelCase , UpperCamelCase :int = self.get_three_from_single(__lowerCamelCase )
nodes.append(__lowerCamelCase )
UpperCamelCase :int = []
for node, tag_list, sub_list in zip(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
UpperCamelCase :str = self.construct_xpath(__lowerCamelCase , __lowerCamelCase )
xpath_strings.append(__lowerCamelCase )
xpaths.append(__lowerCamelCase )
# return as Dict
UpperCamelCase :Optional[int] = {"""nodes""": nodes, """xpaths""": xpaths}
UpperCamelCase :Any = BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase )
return encoded_inputs
| 38 | 0 |
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def __a ( UpperCAmelCase="ro" , UpperCAmelCase="en" , UpperCAmelCase="wmt16" , UpperCAmelCase=None ) ->Tuple:
"""simple docstring"""
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("""run pip install datasets""" )
A = f"""{src_lang}-{tgt_lang}"""
print(f"""Converting {dataset}-{pair}""" )
A = datasets.load_dataset(a_ , a_ )
if save_dir is None:
A = f"""{dataset}-{pair}"""
A = Path(a_ )
save_dir.mkdir(exist_ok=a_ )
for split in ds.keys():
print(f"""Splitting {split} with {ds[split].num_rows} records""" )
# to save to val.source, val.target like summary datasets
A = "val" if split == "validation" else split
A = save_dir.joinpath(f"""{fn}.source""" )
A = save_dir.joinpath(f"""{fn}.target""" )
A = src_path.open("""w+""" )
A = tgt_path.open("""w+""" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
A = x["translation"]
src_fp.write(ex[src_lang] + """\n""" )
tgt_fp.write(ex[tgt_lang] + """\n""" )
print(f"""Saved {dataset} dataset to {save_dir}""" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 367 |
'''simple docstring'''
import math
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : int , _lowerCAmelCase : List[Any]=0 ): # a graph with Node 0,1,...,N-1
A = n
A = [
[math.inf for j in range(0 , _lowerCAmelCase )] for i in range(0 , _lowerCAmelCase )
] # adjacency matrix for weight
A = [
[math.inf for j in range(0 , _lowerCAmelCase )] for i in range(0 , _lowerCAmelCase )
] # dp[i][j] stores minimum distance from i to j
def A (self : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ):
A = w
def A (self : Union[str, Any] ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
A = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def A (self : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] ):
return self.dp[u][v]
if __name__ == "__main__":
_lowerCamelCase : str = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 337 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['XGLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['XGLMTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XGLMForCausalLM',
'XGLMModel',
'XGLMPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'FlaxXGLMForCausalLM',
'FlaxXGLMModel',
'FlaxXGLMPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXGLMForCausalLM',
'TFXGLMModel',
'TFXGLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure) | 191 |
'''simple docstring'''
UpperCAmelCase_ : Dict = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
UpperCAmelCase_ : Any = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
UpperCAmelCase_ : Tuple = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
UpperCAmelCase_ : Dict = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
UpperCAmelCase_ : Tuple = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
UpperCAmelCase_ : Union[str, Any] = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
UpperCAmelCase_ : Tuple = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
UpperCAmelCase_ : int = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 200 | 0 |
'''simple docstring'''
from numpy import exp, pi, sqrt
def UpperCAmelCase ( lowerCamelCase_ :Dict , lowerCamelCase_ :float = 0.0 , lowerCamelCase_ :float = 1.0 ):
'''simple docstring'''
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 371 |
'''simple docstring'''
def UpperCAmelCase ( lowerCamelCase_ :int ):
'''simple docstring'''
snake_case_ : List[Any] = generate_pascal_triangle(lowerCamelCase_ )
for row_idx in range(lowerCamelCase_ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=""" """ )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=""" """ )
else:
print(triangle[row_idx][col_idx] , end="""""" )
print()
def UpperCAmelCase ( lowerCamelCase_ :int ):
'''simple docstring'''
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
snake_case_ : list[list[int]] = []
for current_row_idx in range(lowerCamelCase_ ):
snake_case_ : List[str] = populate_current_row(lowerCamelCase_ , lowerCamelCase_ )
triangle.append(lowerCamelCase_ )
return triangle
def UpperCAmelCase ( lowerCamelCase_ :list[list[int]] , lowerCamelCase_ :int ):
'''simple docstring'''
snake_case_ : Union[str, Any] = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
snake_case_ , snake_case_ : Optional[Any] = 1, 1
for current_col_idx in range(1 , lowerCamelCase_ ):
calculate_current_element(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return current_row
def UpperCAmelCase ( lowerCamelCase_ :list[list[int]] , lowerCamelCase_ :list[int] , lowerCamelCase_ :int , lowerCamelCase_ :int , ):
'''simple docstring'''
snake_case_ : Union[str, Any] = triangle[current_row_idx - 1][current_col_idx - 1]
snake_case_ : List[Any] = triangle[current_row_idx - 1][current_col_idx]
snake_case_ : Optional[int] = above_to_left_elt + above_to_right_elt
def UpperCAmelCase ( lowerCamelCase_ :int ):
'''simple docstring'''
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
snake_case_ : list[list[int]] = [[1]]
for row_index in range(1 , lowerCamelCase_ ):
snake_case_ : Optional[Any] = [0] + result[-1] + [0]
snake_case_ : Dict = row_index + 1
# Calculate the number of distinct elements in a row
snake_case_ : Any = sum(divmod(lowerCamelCase_ , 2 ) )
snake_case_ : Tuple = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
snake_case_ : Optional[int] = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
snake_case_ : str = row_first_half + row_second_half
result.append(lowerCamelCase_ )
return result
def UpperCAmelCase ( ):
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowerCamelCase_ :Callable , lowerCamelCase_ :int ) -> None:
snake_case_ : Dict = F'''{func.__name__}({value})'''
snake_case_ : Dict = timeit(F'''__main__.{call}''' , setup="""import __main__""" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F'''{call:38} -- {timing:.4f} seconds''' )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(lowerCamelCase_ , lowerCamelCase_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 8 | 0 |
import os
def snake_case_ ( snake_case ) -> Dict:
lowercase__: Optional[int] = len(grid[0] )
lowercase__: Any = len(snake_case )
lowercase__: Optional[Any] = 0
lowercase__: List[str] = 0
lowercase__: int = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(snake_case ):
for j in range(n_rows - 3 ):
lowercase__: List[str] = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
lowercase__: Tuple = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
lowercase__: Dict = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
lowercase__: Dict = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
lowercase__: Any = max(
snake_case , snake_case , snake_case , snake_case )
if max_product > largest:
lowercase__: Dict = max_product
return largest
def snake_case_ ( ) -> Optional[Any]:
lowercase__: Optional[Any] = []
with open(os.path.dirname(snake_case ) + '/grid.txt' ) as file:
for line in file:
grid.append(line.strip('\n' ).split(' ' ) )
lowercase__: List[Any] = [[int(snake_case ) for i in grid[j]] for j in range(len(snake_case ) )]
return largest_product(snake_case )
if __name__ == "__main__":
print(solution())
| 196 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __a ( __UpperCamelCase ):
__lowercase : Any = ['vqvae']
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , mel=lowerCAmelCase__ , vqvae=lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
return 50 if isinstance(self.scheduler , lowerCAmelCase__ ) else 1_000
@torch.no_grad()
def __call__( self , lowerCAmelCase__ = 1 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
'''simple docstring'''
lowercase__: Union[str, Any] = steps or self.get_default_steps()
self.scheduler.set_timesteps(lowerCAmelCase__ )
lowercase__: Optional[int] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
lowercase__: Optional[int] = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
lowercase__: List[str] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=lowerCAmelCase__ , device=self.device , )
lowercase__: List[Any] = noise
lowercase__: int = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__: int = self.mel.audio_slice_to_image(lowerCAmelCase__ )
lowercase__: int = np.frombuffer(input_image.tobytes() , dtype='uint8' ).reshape(
(input_image.height, input_image.width) )
lowercase__: str = (input_image / 255) * 2 - 1
lowercase__: Union[str, Any] = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
lowercase__: Optional[int] = self.vqvae.encode(torch.unsqueeze(lowerCAmelCase__ , 0 ) ).latent_dist.sample(
generator=lowerCAmelCase__ )[0]
lowercase__: Dict = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
lowercase__: List[Any] = self.scheduler.add_noise(lowerCAmelCase__ , lowerCAmelCase__ , self.scheduler.timesteps[start_step - 1] )
lowercase__: str = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
lowercase__: Dict = int(mask_start_secs * pixels_per_second )
lowercase__: Tuple = int(mask_end_secs * pixels_per_second )
lowercase__: List[Any] = self.scheduler.add_noise(lowerCAmelCase__ , lowerCAmelCase__ , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , lowerCAmelCase__ ):
lowercase__: Union[str, Any] = self.unet(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )['sample']
else:
lowercase__: Optional[Any] = self.unet(lowerCAmelCase__ , lowerCAmelCase__ )['sample']
if isinstance(self.scheduler , lowerCAmelCase__ ):
lowercase__: List[str] = self.scheduler.step(
model_output=lowerCAmelCase__ , timestep=lowerCAmelCase__ , sample=lowerCAmelCase__ , eta=lowerCAmelCase__ , generator=lowerCAmelCase__ , )['prev_sample']
else:
lowercase__: int = self.scheduler.step(
model_output=lowerCAmelCase__ , timestep=lowerCAmelCase__ , sample=lowerCAmelCase__ , generator=lowerCAmelCase__ , )['prev_sample']
if mask is not None:
if mask_start > 0:
lowercase__: List[Any] = mask[:, step, :, :mask_start]
if mask_end > 0:
lowercase__: Optional[int] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
lowercase__: Union[str, Any] = 1 / self.vqvae.config.scaling_factor * images
lowercase__: Optional[Any] = self.vqvae.decode(lowerCAmelCase__ )['sample']
lowercase__: Dict = (images / 2 + 0.5).clamp(0 , 1 )
lowercase__: List[Any] = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
lowercase__: int = (images * 255).round().astype('uint8' )
lowercase__: Optional[Any] = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(lowerCAmelCase__ , mode='RGB' ).convert('L' ) for _ in images) )
lowercase__: Dict = [self.mel.image_to_audio(lowerCAmelCase__ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(lowerCAmelCase__ )[:, np.newaxis, :] ) , **ImagePipelineOutput(lowerCAmelCase__ ) )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = 50 ) -> np.ndarray:
'''simple docstring'''
assert isinstance(self.scheduler , lowerCAmelCase__ )
self.scheduler.set_timesteps(lowerCAmelCase__ )
lowercase__: List[str] = np.array(
[np.frombuffer(image.tobytes() , dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] )
lowercase__: str = (sample / 255) * 2 - 1
lowercase__: str = torch.Tensor(lowerCAmelCase__ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
lowercase__: Union[str, Any] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
lowercase__: Optional[Any] = self.scheduler.alphas_cumprod[t]
lowercase__: str = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
lowercase__: str = 1 - alpha_prod_t
lowercase__: int = self.unet(lowerCAmelCase__ , lowerCAmelCase__ )['sample']
lowercase__: int = (1 - alpha_prod_t_prev) ** 0.5 * model_output
lowercase__: Optional[int] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
lowercase__: Any = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> torch.Tensor:
'''simple docstring'''
lowercase__: Any = acos(torch.dot(torch.flatten(lowerCAmelCase__ ) , torch.flatten(lowerCAmelCase__ ) ) / torch.norm(lowerCAmelCase__ ) / torch.norm(lowerCAmelCase__ ) )
return sin((1 - alpha) * theta ) * xa / sin(lowerCAmelCase__ ) + sin(alpha * theta ) * xa / sin(lowerCAmelCase__ )
| 196 | 1 |
"""simple docstring"""
from __future__ import annotations
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : str , __a : str ) -> Dict:
_UpperCamelCase, _UpperCamelCase : Optional[Any] = text, pattern
_UpperCamelCase, _UpperCamelCase : List[Any] = len(__a ), len(__a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : str ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def __SCREAMING_SNAKE_CASE ( self : int , __a : int ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def __SCREAMING_SNAKE_CASE ( self : int ) -> list[int]:
# searches pattern in text and returns index positions
_UpperCamelCase : Dict = []
for i in range(self.textLen - self.patLen + 1 ):
_UpperCamelCase : List[str] = self.mismatch_in_text(__a )
if mismatch_index == -1:
positions.append(__a )
else:
_UpperCamelCase : Union[str, Any] = self.match_in_pattern(self.text[mismatch_index] )
_UpperCamelCase : Tuple = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
lowerCamelCase__ = "ABAABA"
lowerCamelCase__ = "AB"
lowerCamelCase__ = BoyerMooreSearch(text, pattern)
lowerCamelCase__ = bms.bad_character_heuristic()
if len(positions) == 0:
print("No match found")
else:
print("Pattern found in following positions: ")
print(positions)
| 310 |
"""simple docstring"""
from __future__ import annotations
from math import pi
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> dict[str, float]:
"""simple docstring"""
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310 | 1 |
"""simple docstring"""
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class UpperCAmelCase_ ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = 1.0 , UpperCamelCase_ = None , ) -> Optional[int]:
super().__init__()
__lowercase : int = initial_learning_rate
__lowercase : Any = warmup_steps
__lowercase : Union[str, Any] = power
__lowercase : str = decay_schedule_fn
__lowercase : List[str] = name
def __call__( self , UpperCamelCase_ ) -> str:
with tf.name_scope(self.name or '''WarmUp''' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
__lowercase : Any = tf.cast(lowercase_ , tf.floataa )
__lowercase : Dict = tf.cast(self.warmup_steps , tf.floataa )
__lowercase : List[str] = global_step_float / warmup_steps_float
__lowercase : int = self.initial_learning_rate * tf.math.pow(lowercase_ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=lowercase_ , )
def _lowerCamelCase ( self ) -> Any:
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 0.0 , __UpperCamelCase = 0.9 , __UpperCamelCase = 0.999 , __UpperCamelCase = 1e-8 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = 0.0 , __UpperCamelCase = 1.0 , __UpperCamelCase = None , ):
__lowercase : Optional[int] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=SCREAMING_SNAKE_CASE__ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=SCREAMING_SNAKE_CASE__ , )
if num_warmup_steps:
__lowercase : Tuple = WarmUp(
initial_learning_rate=SCREAMING_SNAKE_CASE__ , decay_schedule_fn=SCREAMING_SNAKE_CASE__ , warmup_steps=SCREAMING_SNAKE_CASE__ , )
if weight_decay_rate > 0.0:
__lowercase : Optional[int] = AdamWeightDecay(
learning_rate=SCREAMING_SNAKE_CASE__ , weight_decay_rate=SCREAMING_SNAKE_CASE__ , beta_a=SCREAMING_SNAKE_CASE__ , beta_a=SCREAMING_SNAKE_CASE__ , epsilon=SCREAMING_SNAKE_CASE__ , clipnorm=SCREAMING_SNAKE_CASE__ , global_clipnorm=SCREAMING_SNAKE_CASE__ , exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''] , include_in_weight_decay=SCREAMING_SNAKE_CASE__ , )
else:
__lowercase : int = tf.keras.optimizers.Adam(
learning_rate=SCREAMING_SNAKE_CASE__ , beta_a=SCREAMING_SNAKE_CASE__ , beta_a=SCREAMING_SNAKE_CASE__ , epsilon=SCREAMING_SNAKE_CASE__ , clipnorm=SCREAMING_SNAKE_CASE__ , global_clipnorm=SCREAMING_SNAKE_CASE__ , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class UpperCAmelCase_ ( UpperCamelCase_ ):
def __init__( self , UpperCamelCase_ = 0.0_0_1 , UpperCamelCase_ = 0.9 , UpperCamelCase_ = 0.9_9_9 , UpperCamelCase_ = 1E-7 , UpperCamelCase_ = False , UpperCamelCase_ = 0.0 , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = "AdamWeightDecay" , **UpperCamelCase_ , ) -> List[str]:
super().__init__(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , **lowercase_ )
__lowercase : Tuple = weight_decay_rate
__lowercase : List[Any] = include_in_weight_decay
__lowercase : str = exclude_from_weight_decay
@classmethod
def _lowerCamelCase ( cls , UpperCamelCase_ ) -> Dict:
__lowercase : int = {'WarmUp': WarmUp}
return super(lowercase_ , cls ).from_config(lowercase_ , custom_objects=lowercase_ )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
super(lowercase_ , self )._prepare_local(lowercase_ , lowercase_ , lowercase_ )
__lowercase : Union[str, Any] = tf.constant(
self.weight_decay_rate , name='''adam_weight_decay_rate''' )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
__lowercase : List[str] = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''] , use_locking=self._use_locking , )
return tf.no_op()
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_=None , **UpperCamelCase_ ) -> Any:
__lowercase : List[Any] = list(zip(*lowercase_ ) )
return super(lowercase_ , self ).apply_gradients(zip(lowercase_ , lowercase_ ) , name=lowercase_ , **lowercase_ )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
__lowercase : int = apply_state or {}
__lowercase : List[str] = apply_state.get((var_device, var_dtype) )
if coefficients is None:
__lowercase : List[str] = self._fallback_apply_state(lowercase_ , lowercase_ )
__lowercase : Union[str, Any] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None ) -> Union[str, Any]:
__lowercase : Optional[int] = self._get_lr(var.device , var.dtype.base_dtype , lowercase_ )
__lowercase : Tuple = self._decay_weights_op(lowercase_ , lowercase_ , lowercase_ )
with tf.control_dependencies([decay] ):
return super(lowercase_ , self )._resource_apply_dense(lowercase_ , lowercase_ , **lowercase_ )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None ) -> Tuple:
__lowercase : int = self._get_lr(var.device , var.dtype.base_dtype , lowercase_ )
__lowercase : Dict = self._decay_weights_op(lowercase_ , lowercase_ , lowercase_ )
with tf.control_dependencies([decay] ):
return super(lowercase_ , self )._resource_apply_sparse(lowercase_ , lowercase_ , lowercase_ , **lowercase_ )
def _lowerCamelCase ( self ) -> Optional[int]:
__lowercase : Tuple = super().get_config()
config.update({'''weight_decay_rate''': self.weight_decay_rate} )
return config
def _lowerCamelCase ( self , UpperCamelCase_ ) -> List[str]:
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(lowercase_ , lowercase_ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(lowercase_ , lowercase_ ) is not None:
return False
return True
class UpperCAmelCase_ ( UpperCamelCase_ ):
def __init__( self ) -> List[str]:
__lowercase : List[str] = []
__lowercase : Tuple = None
@property
def _lowerCamelCase ( self ) -> str:
if self._accum_steps is None:
__lowercase : Any = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=lowercase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def _lowerCamelCase ( self ) -> Dict:
if not self._gradients:
raise ValueError('''The accumulator should be called first to initialize the gradients''' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , UpperCamelCase_ ) -> Tuple:
if not self._gradients:
__lowercase : List[str] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(lowercase_ ) , trainable=lowercase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(lowercase_ ) != len(self._gradients ):
raise ValueError(F"""Expected {len(self._gradients )} gradients, but got {len(lowercase_ )}""" )
for accum_gradient, gradient in zip(self._gradients , lowercase_ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(lowercase_ )
self._accum_steps.assign_add(1 )
def _lowerCamelCase ( self ) -> int:
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(lowercase_ ) )
| 249 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCamelCase = 'python tqdm regex requests packaging filelock numpy tokenizers'.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('dataclasses')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('importlib_metadata')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def a_ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str=None ):
'''simple docstring'''
require_version(deps[pkg] , SCREAMING_SNAKE_CASE__ )
| 199 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
A : Optional[int] = False
@skip_mps
class __lowerCamelCase ( a_ , a_ , a_ , unittest.TestCase ):
"""simple docstring"""
a = StableDiffusionAttendAndExcitePipeline
a = False
a = TEXT_TO_IMAGE_PARAMS
a = TEXT_TO_IMAGE_BATCH_PARAMS.union({"token_indices"} )
a = TEXT_TO_IMAGE_IMAGE_PARAMS
a = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def A ( cls : Dict):
super().setUpClass()
torch.use_deterministic_algorithms(SCREAMING_SNAKE_CASE)
@classmethod
def A ( cls : Union[str, Any]):
super().tearDownClass()
torch.use_deterministic_algorithms(SCREAMING_SNAKE_CASE)
def A ( self : str):
torch.manual_seed(0)
_A : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=SCREAMING_SNAKE_CASE , )
_A : List[Any] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=SCREAMING_SNAKE_CASE , set_alpha_to_one=SCREAMING_SNAKE_CASE , )
torch.manual_seed(0)
_A : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0)
_A : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
_A : str = CLIPTextModel(SCREAMING_SNAKE_CASE)
_A : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
_A : List[str] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def A ( self : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str=0):
if str(SCREAMING_SNAKE_CASE).startswith('mps'):
_A : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE)
else:
_A : Union[str, Any] = torch.Generator(device=SCREAMING_SNAKE_CASE).manual_seed(SCREAMING_SNAKE_CASE)
_A : Tuple = {
'prompt': 'a cat and a frog',
'token_indices': [2, 5],
'generator': generator,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
'max_iter_to_alter': 2,
'thresholds': {0: 0.7},
}
return inputs
def A ( self : Tuple):
_A : Optional[int] = 'cpu'
_A : Dict = self.get_dummy_components()
_A : int = self.pipeline_class(**SCREAMING_SNAKE_CASE)
pipe.to(SCREAMING_SNAKE_CASE)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE)
_A : Optional[int] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE)
_A : Union[str, Any] = pipe(**SCREAMING_SNAKE_CASE).images
_A : List[Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3))
_A : List[Any] = np.array(
[0.6390_5364, 0.6289_7307, 0.4859_9017, 0.513_3624, 0.555_0048, 0.4576_9516, 0.5032_6973, 0.502_3139, 0.4538_4496])
_A : str = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE , 1e-3)
def A ( self : Any):
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4)
def A ( self : Optional[int]):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2])
def A ( self : List[Any]):
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4)
def A ( self : int):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3)
def A ( self : Union[str, Any]):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4)
def A ( self : Optional[int]):
super().test_save_load_local(expected_max_difference=5e-4)
def A ( self : int):
super().test_save_load_optional_components(expected_max_difference=4e-4)
@require_torch_gpu
@slow
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def A ( cls : Dict):
super().setUpClass()
torch.use_deterministic_algorithms(SCREAMING_SNAKE_CASE)
@classmethod
def A ( cls : Optional[Any]):
super().tearDownClass()
torch.use_deterministic_algorithms(SCREAMING_SNAKE_CASE)
def A ( self : Any):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : int):
_A : Tuple = torch.manual_seed(51)
_A : Optional[Any] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , safety_checker=SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa)
pipe.to('cuda')
_A : Optional[Any] = 'a painting of an elephant with glasses'
_A : Any = [5, 7]
_A : Tuple = pipe(
prompt=SCREAMING_SNAKE_CASE , token_indices=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , generator=SCREAMING_SNAKE_CASE , num_inference_steps=5 , max_iter_to_alter=5 , output_type='numpy' , ).images[0]
_A : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy')
assert np.abs((expected_image - image).max()) < 5e-1
| 227 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : List[str] = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
A : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 227 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
UpperCamelCase__ : List[Any] = list(range(len(SCREAMING_SNAKE_CASE__ ) ) )
UpperCamelCase__ : Dict = [v / w for v, w in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )]
index.sort(key=lambda __lowerCAmelCase : ratio[i] , reverse=SCREAMING_SNAKE_CASE__ )
UpperCamelCase__ : str = 0
UpperCamelCase__ : Optional[int] = [0] * len(SCREAMING_SNAKE_CASE__ )
for i in index:
if weight[i] <= capacity:
UpperCamelCase__ : Union[str, Any] = 1
max_value += value[i]
capacity -= weight[i]
else:
UpperCamelCase__ : List[Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod() | 189 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] ):
"""simple docstring"""
self.test()
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = 0
UpperCAmelCase__ = False
while not completed:
if counter == 1:
self.reset()
UpperCAmelCase__ = self.advance()
if not self.does_advance(_UpperCAmelCase ):
raise Exception(
"""Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.""" )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.update(_UpperCAmelCase )
counter += 1
if counter > 1_00_00:
raise Exception("""update() does not fulfill the constraint.""" )
if self.remaining() != 0:
raise Exception("""Custom Constraint is not defined correctly.""" )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : int ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : int ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : List[Any]=False ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , _UpperCAmelCase : List[int] ):
"""simple docstring"""
super(_UpperCAmelCase , self ).__init__()
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or len(_UpperCAmelCase ) == 0:
raise ValueError(f'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
UpperCAmelCase__ = token_ids
UpperCAmelCase__ = len(self.token_ids )
UpperCAmelCase__ = -1 # the index of the currently fulfilled step
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : int ):
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : int ):
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' )
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
if self.does_advance(_UpperCAmelCase ):
self.fulfilled_idx += 1
UpperCAmelCase__ = True
if self.fulfilled_idx == (self.seqlen - 1):
UpperCAmelCase__ = True
UpperCAmelCase__ = completed
else:
# failed to make progress.
UpperCAmelCase__ = True
self.reset()
return stepped, completed, reset
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = False
UpperCAmelCase__ = 0
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
return self.seqlen - (self.fulfilled_idx + 1)
def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : Optional[int]=False ):
"""simple docstring"""
UpperCAmelCase__ = PhrasalConstraint(self.token_ids )
if stateful:
UpperCAmelCase__ = self.seqlen
UpperCAmelCase__ = self.fulfilled_idx
UpperCAmelCase__ = self.completed
return new_constraint
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Any , _UpperCAmelCase : List[List[int]] , _UpperCAmelCase : List[str]=True ):
"""simple docstring"""
UpperCAmelCase__ = max([len(_UpperCAmelCase ) for one in nested_token_ids] )
UpperCAmelCase__ = {}
for token_ids in nested_token_ids:
UpperCAmelCase__ = root
for tidx, token_id in enumerate(_UpperCAmelCase ):
if token_id not in level:
UpperCAmelCase__ = {}
UpperCAmelCase__ = level[token_id]
if no_subsets and self.has_subsets(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(
"""Each list in `nested_token_ids` can't be a complete subset of another list, but is"""
f''' {nested_token_ids}.''' )
UpperCAmelCase__ = root
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : int ):
"""simple docstring"""
UpperCAmelCase__ = self.trie
for current_token in current_seq:
UpperCAmelCase__ = start[current_token]
UpperCAmelCase__ = list(start.keys() )
return next_tokens
def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = self.next_tokens(_UpperCAmelCase )
return len(_UpperCAmelCase ) == 0
def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = list(root.values() )
if len(_UpperCAmelCase ) == 0:
return 1
else:
return sum([self.count_leaves(_UpperCAmelCase ) for nn in next_nodes] )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict ):
"""simple docstring"""
UpperCAmelCase__ = self.count_leaves(_UpperCAmelCase )
return len(_UpperCAmelCase ) != leaf_count
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : List[List[int]] ):
"""simple docstring"""
super(_UpperCAmelCase , self ).__init__()
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or len(_UpperCAmelCase ) == 0:
raise ValueError(f'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(_UpperCAmelCase , _UpperCAmelCase ) for token_ids in nested_token_ids ):
raise ValueError(f'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
UpperCAmelCase__ = DisjunctiveTrie(_UpperCAmelCase )
UpperCAmelCase__ = nested_token_ids
UpperCAmelCase__ = self.trie.max_height
UpperCAmelCase__ = []
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = self.trie.next_tokens(self.current_seq )
if len(_UpperCAmelCase ) == 0:
return None
else:
return token_list
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : int ):
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' )
UpperCAmelCase__ = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : int ):
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' )
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
if self.does_advance(_UpperCAmelCase ):
self.current_seq.append(_UpperCAmelCase )
UpperCAmelCase__ = True
else:
UpperCAmelCase__ = True
self.reset()
UpperCAmelCase__ = self.trie.reached_leaf(self.current_seq )
UpperCAmelCase__ = completed
return stepped, completed, reset
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = False
UpperCAmelCase__ = []
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : Dict=False ):
"""simple docstring"""
UpperCAmelCase__ = DisjunctiveConstraint(self.token_ids )
if stateful:
UpperCAmelCase__ = self.seqlen
UpperCAmelCase__ = self.current_seq
UpperCAmelCase__ = self.completed
return new_constraint
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCAmelCase : List[Constraint] ):
"""simple docstring"""
UpperCAmelCase__ = constraints
# max # of steps required to fulfill a given constraint
UpperCAmelCase__ = max([c.seqlen for c in constraints] )
UpperCAmelCase__ = len(_UpperCAmelCase )
UpperCAmelCase__ = False
self.init_state()
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = []
UpperCAmelCase__ = None
UpperCAmelCase__ = [constraint.copy(stateful=_UpperCAmelCase ) for constraint in self.constraints]
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
UpperCAmelCase__ = constraint.advance()
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
token_list.append(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
token_list.extend(_UpperCAmelCase )
else:
UpperCAmelCase__ = self.inprogress_constraint.advance()
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
token_list.append(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
token_list.extend(_UpperCAmelCase )
if len(_UpperCAmelCase ) == 0:
return None
else:
return token_list
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Optional[List[int]] ):
"""simple docstring"""
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
UpperCAmelCase__ , UpperCAmelCase__ = self.add(_UpperCAmelCase )
# the entire list of constraints are fulfilled
if self.completed:
break
def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : int ):
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(f'''`token_id` should be an `int`, but is `{token_id}`.''' )
UpperCAmelCase__ , UpperCAmelCase__ = False, False
if self.completed:
UpperCAmelCase__ = True
UpperCAmelCase__ = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.inprogress_constraint.update(_UpperCAmelCase )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_UpperCAmelCase ) )
UpperCAmelCase__ = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
UpperCAmelCase__ = None
if len(self.pending_constraints ) == 0:
# we're done!
UpperCAmelCase__ = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(_UpperCAmelCase ):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = pending_constraint.update(_UpperCAmelCase )
if not stepped:
raise Exception(
"""`constraint.update(token_id)` is not yielding incremental progress, """
"""even though `constraint.does_advance(token_id)` is true.""" )
if complete:
self.complete_constraints.append(_UpperCAmelCase )
UpperCAmelCase__ = None
if not complete and stepped:
UpperCAmelCase__ = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
UpperCAmelCase__ = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
UpperCAmelCase__ = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : List[Any]=True ):
"""simple docstring"""
UpperCAmelCase__ = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
UpperCAmelCase__ = [
constraint.copy(stateful=_UpperCAmelCase ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
UpperCAmelCase__ = self.inprogress_constraint.copy(stateful=_UpperCAmelCase )
UpperCAmelCase__ = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 346 | 0 |
'''simple docstring'''
import pytest
_lowercase : Any = """__dummy_dataset1__"""
_lowercase : Union[str, Any] = """
import json
import os
import datasets
REPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"
URLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
\"tokens\": datasets.Sequence(datasets.Value(\"string\")),
\"ner_tags\": datasets.Sequence(
datasets.features.ClassLabel(
names=[
\"O\",
\"B-PER\",
\"I-PER\",
\"B-ORG\",
\"I-ORG\",
\"B-LOC\",
\"I-LOC\",
]
)
),
\"langs\": datasets.Sequence(datasets.Value(\"string\")),
\"spans\": datasets.Sequence(datasets.Value(\"string\")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),
]
def _generate_examples(self, filepath):
with open(filepath, \"r\", encoding=\"utf-8\") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
"""
@pytest.fixture
def lowerCamelCase__ ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def lowerCamelCase__ ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def lowerCamelCase__ ( A : int , A : str , A : Any ):
'''simple docstring'''
UpperCAmelCase = dataset_loading_script_name
UpperCAmelCase = tmp_path / '''datasets''' / script_name
script_dir.mkdir(parents=A )
UpperCAmelCase = script_dir / f"""{script_name}.py"""
with open(A , '''w''' ) as f:
f.write(A )
return str(A )
| 352 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class UpperCamelCase__:
__magic_name__ : List[str]
__magic_name__ : Optional[str] = None
# Automatically constructed
__magic_name__ : ClassVar[str] = "dict"
__magic_name__ : ClassVar[Any] = None
__magic_name__ : str = field(default="Translation" , init=lowerCAmelCase , repr=lowerCAmelCase )
def __call__( self : Union[str, Any] )-> str:
"""simple docstring"""
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def a__( self : int )-> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class UpperCamelCase__:
__magic_name__ : Optional[List] = None
__magic_name__ : Optional[int] = None
__magic_name__ : Optional[str] = None
# Automatically constructed
__magic_name__ : ClassVar[str] = "dict"
__magic_name__ : ClassVar[Any] = None
__magic_name__ : str = field(default="TranslationVariableLanguages" , init=lowerCAmelCase , repr=lowerCAmelCase )
def a__( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = sorted(set(self.languages ) ) if self.languages else None
UpperCAmelCase = len(self.languages ) if self.languages else None
def __call__( self : int )-> Optional[Any]:
"""simple docstring"""
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def a__( self : Optional[int] , lowerCAmelCase : Dict )-> Tuple:
"""simple docstring"""
UpperCAmelCase = set(self.languages )
if self.languages and set(lowerCAmelCase ) - lang_set:
raise ValueError(
F"""Some languages in example ({", ".join(sorted(set(lowerCAmelCase ) - lang_set ) )}) are not in valid set ({", ".join(lowerCAmelCase )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
UpperCAmelCase = []
for lang, text in translation_dict.items():
if isinstance(lowerCAmelCase , lowerCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
UpperCAmelCase , UpperCAmelCase = zip(*sorted(lowerCAmelCase ) )
return {"language": languages, "translation": translations}
def a__( self : Any )-> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 91 | 0 |
'''simple docstring'''
from math import sqrt
def _lowerCamelCase ( lowercase : int ) -> int:
_a = 0
for i in range(1 , int(sqrt(lowercase ) + 1 ) ):
if n % i == 0 and i != sqrt(lowercase ):
total += i + n // i
elif i == sqrt(lowercase ):
total += i
return total - n
def _lowerCamelCase ( lowercase : int = 1_0000 ) -> int:
_a = sum(
i
for i in range(1 , lowercase )
if sum_of_divisors(sum_of_divisors(lowercase ) ) == i and sum_of_divisors(lowercase ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 63 | """simple docstring"""
from __future__ import annotations
__SCREAMING_SNAKE_CASE =[]
def lowercase__( __SCREAMING_SNAKE_CASE : list[list[int]] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
if board[row][i] == 1:
return False
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(__SCREAMING_SNAKE_CASE , -1 , -1 ) , range(__SCREAMING_SNAKE_CASE , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(__SCREAMING_SNAKE_CASE , -1 , -1 ) , range(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) ) ):
if board[i][j] == 1:
return False
return True
def lowercase__( __SCREAMING_SNAKE_CASE : list[list[int]] , __SCREAMING_SNAKE_CASE : int ):
if row >= len(__SCREAMING_SNAKE_CASE ):
solution.append(__SCREAMING_SNAKE_CASE )
printboard(__SCREAMING_SNAKE_CASE )
print()
return True
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
if is_safe(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : int = 1
solve(__SCREAMING_SNAKE_CASE , row + 1 )
lowercase_ : Dict = 0
return False
def lowercase__( __SCREAMING_SNAKE_CASE : list[list[int]] ):
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
for j in range(len(__SCREAMING_SNAKE_CASE ) ):
if board[i][j] == 1:
print('Q' , end=' ' )
else:
print('.' , end=' ' )
print()
# n=int(input("The no. of queens"))
__SCREAMING_SNAKE_CASE =8
__SCREAMING_SNAKE_CASE =[[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("The total no. of solutions are :", len(solution))
| 213 | 0 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE = 8
# DPR tok
__SCREAMING_SNAKE_CASE = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , 'dpr_tokenizer' )
os.makedirs(_A , exist_ok=_A )
__SCREAMING_SNAKE_CASE = os.path.join(_A , DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
__SCREAMING_SNAKE_CASE = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
__SCREAMING_SNAKE_CASE = dict(zip(_A , range(len(_A ) ) ) )
__SCREAMING_SNAKE_CASE = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__SCREAMING_SNAKE_CASE = {'unk_token': '<unk>'}
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , 'bart_tokenizer' )
os.makedirs(_A , exist_ok=_A )
__SCREAMING_SNAKE_CASE = os.path.join(_A , BART_VOCAB_FILES_NAMES['vocab_file'] )
__SCREAMING_SNAKE_CASE = os.path.join(_A , BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_A ) )
def _A ( self ):
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def _A ( self ):
'''simple docstring'''
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def _A ( self ):
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) )
def _A ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_dummy_dataset()
__SCREAMING_SNAKE_CASE = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
__SCREAMING_SNAKE_CASE = dataset
__SCREAMING_SNAKE_CASE = RagRetriever(
_A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_dummy_dataset()
__SCREAMING_SNAKE_CASE = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='custom' , )
if from_disk:
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , 'dataset' )
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , 'index.faiss' )
dataset.get_index('embeddings' ).save(os.path.join(self.tmpdirname , 'index.faiss' ) )
dataset.drop_index('embeddings' )
dataset.save_to_disk(os.path.join(self.tmpdirname , 'dataset' ) )
del dataset
__SCREAMING_SNAKE_CASE = RagRetriever(
_A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
__SCREAMING_SNAKE_CASE = RagRetriever(
_A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , _A ) , )
return retriever
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , 'hf_bert_base.hnswSQ8_correct_phi_128.c_index' )
dataset.save_faiss_index('embeddings' , index_file_name + '.index.dpr' )
pickle.dump(dataset['id'] , open(index_file_name + '.index_meta.dpr' , 'wb' ) )
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , 'psgs_w100.tsv.pkl' )
__SCREAMING_SNAKE_CASE = {sample['id']: [sample['text'], sample['title']] for sample in dataset}
pickle.dump(_A , open(_A , 'wb' ) )
__SCREAMING_SNAKE_CASE = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='legacy' , index_path=self.tmpdirname , )
__SCREAMING_SNAKE_CASE = RagRetriever(
_A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = self.get_dummy_canonical_hf_index_retriever()
__SCREAMING_SNAKE_CASE = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = retriever.retrieve(_A , n_docs=_A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , _A )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
__SCREAMING_SNAKE_CASE = self.get_dummy_dataset()
retriever.save_pretrained(_A )
__SCREAMING_SNAKE_CASE = RagRetriever.from_pretrained(_A )
self.assertIsInstance(_A , _A )
__SCREAMING_SNAKE_CASE = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__SCREAMING_SNAKE_CASE = retriever.retrieve(_A , n_docs=1 )
self.assertTrue(out is not None )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = self.get_dummy_custom_hf_index_retriever(from_disk=_A )
__SCREAMING_SNAKE_CASE = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = retriever.retrieve(_A , n_docs=_A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , _A )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_dummy_custom_hf_index_retriever(from_disk=_A )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_A )
__SCREAMING_SNAKE_CASE = RagRetriever.from_pretrained(_A )
self.assertIsInstance(_A , _A )
__SCREAMING_SNAKE_CASE = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__SCREAMING_SNAKE_CASE = retriever.retrieve(_A , n_docs=1 )
self.assertTrue(out is not None )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = self.get_dummy_custom_hf_index_retriever(from_disk=_A )
__SCREAMING_SNAKE_CASE = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = retriever.retrieve(_A , n_docs=_A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , _A )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_dummy_custom_hf_index_retriever(from_disk=_A )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_A )
__SCREAMING_SNAKE_CASE = RagRetriever.from_pretrained(_A )
self.assertIsInstance(_A , _A )
__SCREAMING_SNAKE_CASE = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__SCREAMING_SNAKE_CASE = retriever.retrieve(_A , n_docs=1 )
self.assertTrue(out is not None )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = self.get_dummy_legacy_index_retriever()
__SCREAMING_SNAKE_CASE = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = retriever.retrieve(_A , n_docs=_A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['text', 'title'] )
self.assertEqual(len(doc_dicts[0]['text'] ) , _A )
self.assertEqual(doc_dicts[0]['text'][0] , 'bar' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['text'][0] , 'foo' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_A )
__SCREAMING_SNAKE_CASE = RagRetriever.from_pretrained(_A )
self.assertIsInstance(_A , _A )
__SCREAMING_SNAKE_CASE = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__SCREAMING_SNAKE_CASE = retriever.retrieve(_A , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def _A ( self ):
'''simple docstring'''
import torch
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = self.get_dummy_canonical_hf_index_retriever()
__SCREAMING_SNAKE_CASE = [[5, 7], [10, 11]]
__SCREAMING_SNAKE_CASE = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__SCREAMING_SNAKE_CASE = retriever(_A , _A , prefix=retriever.config.generator.prefix , n_docs=_A )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_A , _A )
self.assertIsInstance(_A , _A )
self.assertIsInstance(_A , np.ndarray )
__SCREAMING_SNAKE_CASE = retriever(
_A , _A , prefix=retriever.config.generator.prefix , n_docs=_A , return_tensors='pt' , )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = ( # noqa: F841
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
out['doc_ids'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_A , torch.Tensor )
self.assertIsInstance(_A , torch.Tensor )
self.assertIsInstance(_A , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_dpr_ctx_encoder_tokenizer()
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = self.get_dummy_custom_hf_index_retriever(from_disk=_A )
retriever.set_ctx_encoder_tokenizer(_A )
__SCREAMING_SNAKE_CASE = [[5, 7], [10, 11]]
__SCREAMING_SNAKE_CASE = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__SCREAMING_SNAKE_CASE = retriever(_A , _A , prefix=retriever.config.generator.prefix , n_docs=_A )
self.assertEqual(
len(_A ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('tokenized_doc_ids', 'tokenized_doc_attention_mask') ) , _A ) # check for doc token related keys in dictionary.
| 118 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = LayoutLMTokenizer
UpperCamelCase__ : Any = LayoutLMTokenizerFast
UpperCamelCase__ : Optional[int] = True
UpperCamelCase__ : int = True
def _A ( self ):
'''simple docstring'''
super().setUp()
__SCREAMING_SNAKE_CASE = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def _A ( self , **_A ):
'''simple docstring'''
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_A )
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 'UNwant\u00E9d,running'
__SCREAMING_SNAKE_CASE = 'unwanted, running'
return input_text, output_text
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file )
__SCREAMING_SNAKE_CASE = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_A , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [7, 4, 5, 10, 8, 9] )
def _A ( self ):
'''simple docstring'''
pass
| 118 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class _snake_case ( __A ):
'''simple docstring'''
A__ : Optional[Any] = "philschmid/bart-large-cnn-samsum"
A__ : Tuple = (
"This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, "
"and returns a summary of the text."
)
A__ : str = "summarizer"
A__ : str = AutoTokenizer
A__ : str = AutoModelForSeqaSeqLM
A__ : Optional[int] = ["text"]
A__ : Optional[int] = ["text"]
def A__ ( self: str ,lowerCamelCase_: int ) -> Optional[int]:
return self.pre_processor(_UpperCamelCase ,return_tensors="""pt""" ,truncation=_UpperCamelCase )
def A__ ( self: Tuple ,lowerCamelCase_: Optional[int] ) -> Tuple:
return self.model.generate(**_UpperCamelCase )[0]
def A__ ( self: Optional[Any] ,lowerCamelCase_: Optional[int] ) -> Any:
return self.pre_processor.decode(_UpperCamelCase ,skip_special_tokens=_UpperCamelCase ,clean_up_tokenization_spaces=_UpperCamelCase )
| 345 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = len(SCREAMING_SNAKE_CASE__ )
# We need to create solution object to save path.
snake_case_ = [[0 for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )]
snake_case_ = run_maze(SCREAMING_SNAKE_CASE__ , 0 , 0 , SCREAMING_SNAKE_CASE__ )
if solved:
print('''\n'''.join(str(SCREAMING_SNAKE_CASE__ ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = len(SCREAMING_SNAKE_CASE__ )
# Final check point.
if i == j == (size - 1):
snake_case_ = 1
return True
snake_case_ = (not i < 0) and (not j < 0) # Check lower bounds
snake_case_ = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
snake_case_ = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
snake_case_ = 1
# check for directions
if (
run_maze(SCREAMING_SNAKE_CASE__ , i + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
or run_maze(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , j + 1 , SCREAMING_SNAKE_CASE__ )
or run_maze(SCREAMING_SNAKE_CASE__ , i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
or run_maze(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , j - 1 , SCREAMING_SNAKE_CASE__ )
):
return True
snake_case_ = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 0 |
"""simple docstring"""
A_ : Optional[Any] = "Input must be a string of 8 numbers plus letter"
A_ : str = "TRWAGMYFPDXBNJZSQVHLCKE"
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = f'''Expected string as input, found {type(_lowerCamelCase ).__name__}'''
raise TypeError(_lowerCamelCase )
lowerCamelCase__ : Tuple = spanish_id.replace('-' , '' ).upper()
if len(_lowerCamelCase ) != 9:
raise ValueError(_lowerCamelCase )
try:
lowerCamelCase__ : int = int(spanish_id_clean[0:8] )
lowerCamelCase__ : List[Any] = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(_lowerCamelCase ) from ex
if letter.isdigit():
raise ValueError(_lowerCamelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316 |
"""simple docstring"""
import cva
import numpy as np
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if k in (0.04, 0.06):
lowerCamelCase__ : Tuple = k
lowerCamelCase__ : Optional[Any] = window_size
else:
raise ValueError('invalid k value' )
def __str__(self ):
'''simple docstring'''
return str(self.k )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = cva.imread(lowerCamelCase_, 0 )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = img.shape
lowerCamelCase__ : list[list[int]] = []
lowerCamelCase__ : Optional[Any] = img.copy()
lowerCamelCase__ : Optional[Any] = cva.cvtColor(lowerCamelCase_, cva.COLOR_GRAY2RGB )
lowerCamelCase__ , lowerCamelCase__ : Any = np.gradient(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = dx**2
lowerCamelCase__ : List[Any] = dy**2
lowerCamelCase__ : List[str] = dx * dy
lowerCamelCase__ : Tuple = 0.04
lowerCamelCase__ : List[Any] = self.window_size // 2
for y in range(lowerCamelCase_, h - offset ):
for x in range(lowerCamelCase_, w - offset ):
lowerCamelCase__ : Union[str, Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : Optional[Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : List[Any] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : str = (wxx * wyy) - (wxy**2)
lowerCamelCase__ : Dict = wxx + wyy
lowerCamelCase__ : Union[str, Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0), 0 )
color_img.itemset((y, x, 1), 0 )
color_img.itemset((y, x, 2), 2_5_5 )
return color_img, corner_list
if __name__ == "__main__":
A_ : Optional[Any] = HarrisCorner(0.04, 3)
A_, A_ : List[Any] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 316 | 1 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
snake_case_ : str = pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def A (__A : Union[str, Any] , __A : Dict ) -> Optional[Any]:
"""simple docstring"""
inspect_dataset(__A , __A )
UpperCAmelCase_ = path + '''.py'''
assert script_name in os.listdir(__A )
assert "__pycache__" not in os.listdir(__A )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def A (__A : Optional[Any] , __A : List[str] ) -> Tuple:
"""simple docstring"""
inspect_metric(__A , __A )
UpperCAmelCase_ = path + '''.py'''
assert script_name in os.listdir(__A )
assert "__pycache__" not in os.listdir(__A )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def A (__A : Optional[Any] , __A : int , __A : Optional[int] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = get_dataset_config_info(__A , config_name=__A )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def A (__A : List[Any] , __A : Optional[int] , __A : List[str] ) -> Optional[int]:
"""simple docstring"""
with pytest.raises(__A ):
get_dataset_config_info(__A , config_name=__A )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def A (__A : Any , __A : Dict ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = get_dataset_config_names(__A )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def A (__A : List[str] , __A : List[str] , __A : int ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = get_dataset_infos(__A )
assert list(infos.keys() ) == expected_configs
UpperCAmelCase_ = expected_configs[0]
assert expected_config in infos
UpperCAmelCase_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def A (__A : Optional[Any] , __A : List[Any] , __A : Tuple ) -> str:
"""simple docstring"""
UpperCAmelCase_ = get_dataset_infos(__A )
assert expected_config in infos
UpperCAmelCase_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def A (__A : str , __A : List[str] , __A : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
with pytest.raises(__A ):
get_dataset_split_names(__A , config_name=__A )
| 51 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
snake_case_ : List[Any] = data_utils.TransfoXLTokenizer
snake_case_ : int = data_utils.TransfoXLCorpus
snake_case_ : List[Any] = data_utils
snake_case_ : int = data_utils
def A (__A : Dict , __A : List[Any] , __A : Union[str, Any] , __A : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(__A , '''rb''' ) as fp:
UpperCAmelCase_ = pickle.load(__A , encoding='''latin1''' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
UpperCAmelCase_ = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file''']
print(F"""Save vocabulary to {pytorch_vocab_dump_path}""" )
UpperCAmelCase_ = corpus.vocab.__dict__
torch.save(__A , __A )
UpperCAmelCase_ = corpus.__dict__
corpus_dict_no_vocab.pop('''vocab''' , __A )
UpperCAmelCase_ = pytorch_dump_folder_path + '''/''' + CORPUS_NAME
print(F"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(__A , __A )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
UpperCAmelCase_ = os.path.abspath(__A )
UpperCAmelCase_ = os.path.abspath(__A )
print(F"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
UpperCAmelCase_ = TransfoXLConfig()
else:
UpperCAmelCase_ = TransfoXLConfig.from_json_file(__A )
print(F"""Building PyTorch model from configuration: {config}""" )
UpperCAmelCase_ = TransfoXLLMHeadModel(__A )
UpperCAmelCase_ = load_tf_weights_in_transfo_xl(__A , __A , __A )
# Save pytorch-model
UpperCAmelCase_ = os.path.join(__A , __A )
UpperCAmelCase_ = os.path.join(__A , __A )
print(F"""Save PyTorch model to {os.path.abspath(__A )}""" )
torch.save(model.state_dict() , __A )
print(F"""Save configuration file to {os.path.abspath(__A )}""" )
with open(__A , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
snake_case_ : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--tf_checkpoint_path",
default="",
type=str,
help="An optional path to a TensorFlow checkpoint path to be converted.",
)
parser.add_argument(
"--transfo_xl_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--transfo_xl_dataset_file",
default="",
type=str,
help="An optional dataset file to be converted in a vocabulary.",
)
snake_case_ : int = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 51 | 1 |
"""simple docstring"""
import heapq
import sys
import numpy as np
_lowerCAmelCase : Any = tuple[int, int]
class UpperCAmelCase_ :
def __init__( self : str ):
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : str = set()
def snake_case_ ( self : Optional[int] ):
if not self.empty():
return self.elements[0][0]
else:
return float("inf" )
def snake_case_ ( self : List[Any] ):
return len(self.elements ) == 0
def snake_case_ ( self : Dict , A : Any , A : str ):
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(A )
else:
# update
# print("update", item)
_UpperCAmelCase : Optional[int] = []
((_UpperCAmelCase) , (_UpperCAmelCase)) : Any = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((_UpperCAmelCase) , (_UpperCAmelCase)) : str = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def snake_case_ ( self : List[Any] , A : str ):
if item in self.set:
self.set.remove(A )
_UpperCAmelCase : int = []
((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def snake_case_ ( self : int ):
return self.elements[0][1]
def snake_case_ ( self : Any ):
((_UpperCAmelCase) , (_UpperCAmelCase)) : Any = heapq.heappop(self.elements )
self.set.remove(A )
return (priority, item)
def __snake_case ( SCREAMING_SNAKE_CASE__ : TPos , SCREAMING_SNAKE_CASE__ : TPos ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = np.array(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : int = np.array(SCREAMING_SNAKE_CASE__ )
return np.linalg.norm(a - b )
def __snake_case ( SCREAMING_SNAKE_CASE__ : TPos , SCREAMING_SNAKE_CASE__ : TPos ) -> Optional[Any]:
'''simple docstring'''
return consistent_heuristic(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) // t
def __snake_case ( SCREAMING_SNAKE_CASE__ : TPos , SCREAMING_SNAKE_CASE__ : TPos ) -> Dict:
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def __snake_case ( SCREAMING_SNAKE_CASE__ : TPos , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : TPos , SCREAMING_SNAKE_CASE__ : dict[TPos, float] ) -> int:
'''simple docstring'''
_UpperCAmelCase : Tuple = g_function[start] + Wa * heuristics[i](SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return ans
def __snake_case ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = np.chararray((n, n) )
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(SCREAMING_SNAKE_CASE__ ):
_UpperCAmelCase : Optional[int] = "*"
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(SCREAMING_SNAKE_CASE__ ):
if (j, (n - 1) - i) in blocks:
_UpperCAmelCase : Any = "#"
_UpperCAmelCase : Optional[Any] = "-"
_UpperCAmelCase : List[Any] = back_pointer[goal]
while x != start:
((_UpperCAmelCase) , (_UpperCAmelCase)) : List[Any] = x
# print(x)
_UpperCAmelCase : Optional[Any] = "-"
_UpperCAmelCase : int = back_pointer[x]
_UpperCAmelCase : Union[str, Any] = "-"
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(SCREAMING_SNAKE_CASE__ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=" " )
print("<-- End position" , end=" " )
else:
print(grid[i][j] , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
print("PATH TAKEN BY THE ALGORITHM IS:-" )
_UpperCAmelCase : int = back_pointer[goal]
while x != start:
print(SCREAMING_SNAKE_CASE__ , end=" " )
_UpperCAmelCase : Optional[Any] = back_pointer[x]
print(SCREAMING_SNAKE_CASE__ )
sys.exit()
def __snake_case ( SCREAMING_SNAKE_CASE__ : TPos ) -> List[Any]:
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def __snake_case ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , ) -> Optional[int]:
'''simple docstring'''
for itera in range(SCREAMING_SNAKE_CASE__ ):
open_list[itera].remove_element(SCREAMING_SNAKE_CASE__ )
# print("s", s)
# print("j", j)
((_UpperCAmelCase) , (_UpperCAmelCase)) : int = s
_UpperCAmelCase : List[Any] = (x - 1, y)
_UpperCAmelCase : Any = (x + 1, y)
_UpperCAmelCase : Tuple = (x, y + 1)
_UpperCAmelCase : int = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(SCREAMING_SNAKE_CASE__ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : int = -1
_UpperCAmelCase : int = float("inf" )
if valid(SCREAMING_SNAKE_CASE__ ) and g_function[neighbours] > g_function[s] + 1:
_UpperCAmelCase : Union[str, Any] = g_function[s] + 1
_UpperCAmelCase : Union[str, Any] = s
if neighbours not in close_list_anchor:
open_list[0].put(SCREAMING_SNAKE_CASE__ , key(SCREAMING_SNAKE_CASE__ , 0 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
if neighbours not in close_list_inad:
for var in range(1 , SCREAMING_SNAKE_CASE__ ):
if key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) <= Wa * key(
SCREAMING_SNAKE_CASE__ , 0 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
open_list[j].put(
SCREAMING_SNAKE_CASE__ , key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def __snake_case ( ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
_lowerCAmelCase : Optional[int] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
_lowerCAmelCase : Any = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
_lowerCAmelCase : Optional[int] = make_common_ground()
_lowerCAmelCase : Union[str, Any] = blocks_blk
# hyper parameters
_lowerCAmelCase : Dict = 1
_lowerCAmelCase : Any = 1
_lowerCAmelCase : int = 20
_lowerCAmelCase : int = 3 # one consistent and two other inconsistent
# start and end destination
_lowerCAmelCase : Union[str, Any] = (0, 0)
_lowerCAmelCase : Tuple = (n - 1, n - 1)
_lowerCAmelCase : Union[str, Any] = 1
def __snake_case ( SCREAMING_SNAKE_CASE__ : TPos , SCREAMING_SNAKE_CASE__ : TPos , SCREAMING_SNAKE_CASE__ : int ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Any = {start: 0, goal: float("inf" )}
_UpperCAmelCase : Dict = {start: -1, goal: -1}
_UpperCAmelCase : Any = []
_UpperCAmelCase : Tuple = set()
for i in range(SCREAMING_SNAKE_CASE__ ):
open_list.append(PriorityQueue() )
open_list[i].put(SCREAMING_SNAKE_CASE__ , key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
_UpperCAmelCase : list[int] = []
_UpperCAmelCase : list[int] = []
while open_list[0].minkey() < float("inf" ):
for i in range(1 , SCREAMING_SNAKE_CASE__ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("inf" ):
do_something(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
_UpperCAmelCase , _UpperCAmelCase : int = open_list[i].top_show()
visited.add(SCREAMING_SNAKE_CASE__ )
expand_state(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
close_list_inad.append(SCREAMING_SNAKE_CASE__ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("inf" ):
do_something(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
_UpperCAmelCase : Optional[int] = open_list[0].top_show()
visited.add(SCREAMING_SNAKE_CASE__ )
expand_state(
SCREAMING_SNAKE_CASE__ , 0 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
close_list_anchor.append(SCREAMING_SNAKE_CASE__ )
print("No path found to goal" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(SCREAMING_SNAKE_CASE__ ):
if (j, i) in blocks:
print("#" , end=" " )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("*" , end=" " )
else:
print("-" , end=" " )
else:
print("*" , end=" " )
if (j, i) == (n - 1, n - 1):
print("<-- End position" , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 202 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
_lowerCAmelCase : List[str] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def __snake_case ( SCREAMING_SNAKE_CASE__ : Dict ) -> List[str]:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
_UpperCAmelCase : str = k.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return k
def __snake_case ( SCREAMING_SNAKE_CASE__ : dict , SCREAMING_SNAKE_CASE__ : dict ) -> PegasusForConditionalGeneration:
'''simple docstring'''
_UpperCAmelCase : List[Any] = DEFAULTS.copy()
cfg_kwargs.update(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Optional[Any] = PegasusConfig(**SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Optional[Any] = PegasusForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Optional[int] = torch_model.model.state_dict()
_UpperCAmelCase : Union[str, Any] = {}
for k, v in tf_weights.items():
_UpperCAmelCase : Union[str, Any] = rename_state_dict_key(SCREAMING_SNAKE_CASE__ )
if new_k not in sd:
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if "dense" in k or "proj" in new_k:
_UpperCAmelCase : Any = v.T
_UpperCAmelCase : str = torch.tensor(SCREAMING_SNAKE_CASE__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f'{new_k}, {k}, {v.shape}, {sd[new_k].shape}'
# make sure embedding.padding_idx is respected
_UpperCAmelCase : Tuple = torch.zeros_like(mapping["shared.weight"][cfg.pad_token_id + 1] )
_UpperCAmelCase : Any = mapping["shared.weight"]
_UpperCAmelCase : Dict = mapping["shared.weight"]
_UpperCAmelCase : Dict = {k: torch.zeros_like(SCREAMING_SNAKE_CASE__ ) for k, v in sd.items() if k.endswith("bias" ) and k not in mapping}
mapping.update(**SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = torch_model.model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : int = [
k for k in missing if k not in ["encoder.embed_positions.weight", "decoder.embed_positions.weight"]
]
assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], f'no matches found for the following tf keys {extra}'
return torch_model
def __snake_case ( SCREAMING_SNAKE_CASE__ : Dict="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = tf.train.list_variables(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Union[str, Any] = {}
_UpperCAmelCase : Optional[Any] = ["Adafactor", "global_step"]
for name, shape in tqdm(SCREAMING_SNAKE_CASE__ , desc="converting tf checkpoint to dict" ):
_UpperCAmelCase : Union[str, Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
_UpperCAmelCase : int = tf.train.load_variable(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Dict = array
return tf_weights
def __snake_case ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Dict = Path(SCREAMING_SNAKE_CASE__ ).parent.name
_UpperCAmelCase : Tuple = task_specific_params[f'summarization_{dataset}']["max_position_embeddings"]
_UpperCAmelCase : Dict = PegasusTokenizer.from_pretrained("sshleifer/pegasus" , model_max_length=SCREAMING_SNAKE_CASE__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(SCREAMING_SNAKE_CASE__ )
# convert model
_UpperCAmelCase : Union[str, Any] = get_tf_weights_as_numpy(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Optional[Any] = task_specific_params[f'summarization_{dataset}']
if dataset == "large":
_UpperCAmelCase : Optional[int] = task_specific_params
_UpperCAmelCase : str = convert_pegasus(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
torch_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Optional[Any] = torch_model.state_dict()
sd.pop("model.decoder.embed_positions.weight" )
sd.pop("model.encoder.embed_positions.weight" )
torch.save(SCREAMING_SNAKE_CASE__ , Path(SCREAMING_SNAKE_CASE__ ) / "pytorch_model.bin" )
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
if args.save_dir is None:
_lowerCAmelCase : Tuple = Path(args.tf_ckpt_path).parent.name
_lowerCAmelCase : Dict = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 202 | 1 |
"""simple docstring"""
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowercase ( unittest.TestCase ):
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = 0
@slow
def _snake_case ( self ) -> int:
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
lowerCAmelCase = AutoTokenizer.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(lowercase_ ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
lowerCAmelCase = AutoTokenizer.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(lowercase_ ) , 0 )
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = AutoTokenizer.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def _snake_case ( self ) -> str:
lowerCAmelCase = AutoTokenizer.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def _snake_case ( self ) -> int:
lowerCAmelCase = AutoConfig.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
# Check that tokenizer_type ≠ model_type
lowerCAmelCase = AutoTokenizer.from_pretrained(lowercase_ , config=lowercase_ )
self.assertIsInstance(lowercase_ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def _snake_case ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(lowercase_ , """vocab.txt""" ) )
lowerCAmelCase = AutoTokenizer.from_pretrained(lowercase_ , tokenizer_type="""bert""" , use_fast=lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(lowercase_ , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(lowercase_ , """merges.txt""" ) )
lowerCAmelCase = AutoTokenizer.from_pretrained(lowercase_ , tokenizer_type="""gpt2""" , use_fast=lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
@require_tokenizers
def _snake_case ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(lowercase_ , """vocab.txt""" ) )
lowerCAmelCase = AutoTokenizer.from_pretrained(lowercase_ , tokenizer_type="""bert""" )
self.assertIsInstance(lowercase_ , lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(lowercase_ , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(lowercase_ , """merges.txt""" ) )
lowerCAmelCase = AutoTokenizer.from_pretrained(lowercase_ , tokenizer_type="""gpt2""" )
self.assertIsInstance(lowercase_ , lowercase_ )
def _snake_case ( self ) -> List[Any]:
with pytest.raises(lowercase_ ):
AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" )
@require_tokenizers
def _snake_case ( self ) -> List[str]:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
lowerCAmelCase = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" )
self.assertIsInstance(lowercase_ , (BertTokenizer, BertTokenizerFast) )
if isinstance(lowercase_ , lowercase_ ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , lowercase_ )
else:
self.assertEqual(tokenizer.do_lower_case , lowercase_ )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def _snake_case ( self ) -> Any:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
lowercase_ , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ):
lowerCAmelCase = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" )
def _snake_case ( self ) -> Optional[Any]:
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
lowerCAmelCase = TOKENIZER_MAPPING.values()
lowerCAmelCase = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(lowercase_ )
@require_tokenizers
def _snake_case ( self ) -> Dict:
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=lowercase_ ) , lowercase_ )
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , lowercase_ )
@require_tokenizers
def _snake_case ( self ) -> str:
lowerCAmelCase = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=lowercase_ )
lowerCAmelCase = """Hello, world. How are you?"""
lowerCAmelCase = tokenizer.tokenize(lowercase_ )
self.assertEqual("""[UNK]""" , tokens[0] )
lowerCAmelCase = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=lowercase_ )
lowerCAmelCase = tokenizer.tokenize(lowercase_ )
self.assertEqual("""[UNK]""" , tokens[0] )
@require_tokenizers
def _snake_case ( self ) -> str:
lowerCAmelCase = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" )
self.assertEqual(type(lowercase_ ) , lowercase_ )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 30_000 )
self.assertEqual(tokenizer.unk_token , """[UNK]""" )
self.assertEqual(tokenizer.padding_side , """right""" )
self.assertEqual(tokenizer.truncation_side , """right""" )
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = AutoTokenizer.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase_ )
lowerCAmelCase = AutoTokenizer.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = AutoTokenizer.from_pretrained("""ctrl""" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(lowercase_ , lowercase_ )
def _snake_case ( self ) -> Optional[int]:
# Check we can load the tokenizer config of an online model.
lowerCAmelCase = get_tokenizer_config("""bert-base-cased""" )
lowerCAmelCase = config.pop("""_commit_hash""" , lowercase_ )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(lowercase_ , {"""do_lower_case""": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
lowerCAmelCase = get_tokenizer_config(lowercase_ )
self.assertDictEqual(lowercase_ , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
lowerCAmelCase = AutoTokenizer.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase_ )
lowerCAmelCase = get_tokenizer_config(lowercase_ )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" )
def _snake_case ( self ) -> Tuple:
try:
AutoConfig.register("""custom""" , lowercase_ )
AutoTokenizer.register(lowercase_ , slow_tokenizer_class=lowercase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_ ):
AutoTokenizer.register(lowercase_ , slow_tokenizer_class=lowercase_ )
lowerCAmelCase = CustomTokenizer.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase_ )
lowerCAmelCase = AutoTokenizer.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def _snake_case ( self ) -> List[Any]:
try:
AutoConfig.register("""custom""" , lowercase_ )
# Can register in two steps
AutoTokenizer.register(lowercase_ , slow_tokenizer_class=lowercase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(lowercase_ , fast_tokenizer_class=lowercase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
lowercase_ , slow_tokenizer_class=lowercase_ , fast_tokenizer_class=lowercase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_ ):
AutoTokenizer.register(lowercase_ , fast_tokenizer_class=lowercase_ )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase = BertTokenizerFast.from_pretrained(lowercase_ )
bert_tokenizer.save_pretrained(lowercase_ )
lowerCAmelCase = CustomTokenizerFast.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase_ )
lowerCAmelCase = AutoTokenizer.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
lowerCAmelCase = AutoTokenizer.from_pretrained(lowercase_ , use_fast=lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _snake_case ( self ) -> Tuple:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase_ ):
lowerCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase_ ):
lowerCAmelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase_ )
lowerCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase_ )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase_ )
lowerCAmelCase = AutoTokenizer.from_pretrained(lowercase_ , trust_remote_code=lowercase_ )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
lowerCAmelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase_ , use_fast=lowercase_ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase_ )
lowerCAmelCase = AutoTokenizer.from_pretrained(lowercase_ , trust_remote_code=lowercase_ , use_fast=lowercase_ )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
@require_tokenizers
def _snake_case ( self ) -> Optional[int]:
class lowercase ( SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = False
class lowercase ( SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = NewTokenizer
_SCREAMING_SNAKE_CASE = False
try:
AutoConfig.register("""custom""" , lowercase_ )
AutoTokenizer.register(lowercase_ , slow_tokenizer_class=lowercase_ )
AutoTokenizer.register(lowercase_ , fast_tokenizer_class=lowercase_ )
# If remote code is not set, the default is to use local
lowerCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
lowerCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=lowercase_ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
lowerCAmelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase_ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
lowerCAmelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase_ , use_fast=lowercase_ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
lowerCAmelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase_ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertTrue(tokenizer.special_attribute_present )
lowerCAmelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase_ , use_fast=lowercase_ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=lowercase_ )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
lowerCAmelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=lowercase_ , use_fast=lowercase_ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def _snake_case ( self ) -> List[Any]:
with self.assertRaisesRegex(
lowercase_ , """bert-base is not a local folder and is not a valid model identifier""" ):
lowerCAmelCase = AutoTokenizer.from_pretrained("""bert-base""" )
def _snake_case ( self ) -> Optional[int]:
with self.assertRaisesRegex(
lowercase_ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
lowerCAmelCase = AutoTokenizer.from_pretrained(lowercase_ , revision="""aaaaaa""" )
def _snake_case ( self ) -> Tuple:
# Make sure we have cached the tokenizer.
lowerCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
lowerCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 46 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
snake_case_ = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 78 | 0 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCAmelCase__ ( A__ , A__ ):
"""simple docstring"""
@register_to_config
def __init__( self : Tuple , *,
__lowerCamelCase : int = 4 , __lowerCamelCase : int = 768 , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , ) -> Tuple:
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.zeros(__lowerCamelCase ) )
# parameters for additional clip time embeddings
SCREAMING_SNAKE_CASE__ = nn.Linear(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = nn.Linear(__lowerCamelCase , __lowerCamelCase )
# parameters for encoder hidden states
SCREAMING_SNAKE_CASE__ = clip_extra_context_tokens
SCREAMING_SNAKE_CASE__ = nn.Linear(
__lowerCamelCase , self.clip_extra_context_tokens * cross_attention_dim )
SCREAMING_SNAKE_CASE__ = nn.Linear(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = nn.LayerNorm(__lowerCamelCase )
def lowercase_ ( self : str , *, __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] ) -> Dict:
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
SCREAMING_SNAKE_CASE__ = image_embeddings.shape[0]
SCREAMING_SNAKE_CASE__ = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
SCREAMING_SNAKE_CASE__ = classifier_free_guidance_embeddings.expand(
__lowerCamelCase , -1 )
SCREAMING_SNAKE_CASE__ = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
SCREAMING_SNAKE_CASE__ = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
SCREAMING_SNAKE_CASE__ = self.embedding_proj(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.clip_image_embeddings_project_to_time_embeddings(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
SCREAMING_SNAKE_CASE__ = self.clip_extra_context_tokens_proj(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = clip_extra_context_tokens.reshape(__lowerCamelCase , -1 , self.clip_extra_context_tokens )
SCREAMING_SNAKE_CASE__ = clip_extra_context_tokens.permute(0 , 2 , 1 )
SCREAMING_SNAKE_CASE__ = self.encoder_hidden_states_proj(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.text_encoder_hidden_states_norm(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 218 |
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
if not isinstance(_A , _A ):
raise ValueError('''multiplicative_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''' )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = str(_A )
while len(_A ) != 1:
SCREAMING_SNAKE_CASE__ = [int(_A ) for i in num_string]
SCREAMING_SNAKE_CASE__ = 1
for i in range(0 , len(_A ) ):
total *= numbers[i]
SCREAMING_SNAKE_CASE__ = str(_A )
steps += 1
return steps
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
if not isinstance(_A , _A ):
raise ValueError('''additive_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''' )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = str(_A )
while len(_A ) != 1:
SCREAMING_SNAKE_CASE__ = [int(_A ) for i in num_string]
SCREAMING_SNAKE_CASE__ = 0
for i in range(0 , len(_A ) ):
total += numbers[i]
SCREAMING_SNAKE_CASE__ = str(_A )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 218 | 1 |
import os
from typing import Dict, List, Tuple, TypeVar, Union
UpperCamelCase__ = TypeVar("""T""")
UpperCamelCase__ = Union[List[T], Tuple[T, ...]]
UpperCamelCase__ = Union[T, List[T], Dict[str, T]]
UpperCamelCase__ = Union[str, bytes, os.PathLike]
| 92 |
'''simple docstring'''
def UpperCamelCase ( _lowerCamelCase : int | float | str ):
try:
A__ = float(_lowerCamelCase )
except ValueError:
raise ValueError("Please enter a valid number" )
A__ = decimal - int(_lowerCamelCase )
if fractional_part == 0:
return int(_lowerCamelCase ), 1
else:
A__ = len(str(_lowerCamelCase ).split("." )[1] )
A__ = int(decimal * (10**number_of_frac_digits) )
A__ = 10**number_of_frac_digits
A__, A__ = denominator, numerator
while True:
A__ = dividend % divisor
if remainder == 0:
break
A__, A__ = divisor, remainder
A__, A__ = numerator / divisor, denominator / divisor
return int(_lowerCamelCase ), int(_lowerCamelCase )
if __name__ == "__main__":
print(f"""{decimal_to_fraction(2) = }""")
print(f"""{decimal_to_fraction(89.0) = }""")
print(f"""{decimal_to_fraction("67") = }""")
print(f"""{decimal_to_fraction("45.0") = }""")
print(f"""{decimal_to_fraction(1.5) = }""")
print(f"""{decimal_to_fraction("6.25") = }""")
print(f"""{decimal_to_fraction("78td") = }""")
| 237 | 0 |
def _snake_case ( _snake_case : int ) -> Tuple:
'''simple docstring'''
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('check_bouncy() accepts only integer arguments' )
_A = str(lowerCamelCase_ )
_A = """""".join(sorted(lowerCamelCase_ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _snake_case ( _snake_case : float = 99 ) -> Dict:
'''simple docstring'''
if not 0 < percent < 1_00:
raise ValueError('solution() only accepts values from 0 to 100' )
_A = 0
_A = 1
while True:
if check_bouncy(lowerCamelCase_ ):
bouncy_num += 1
if (bouncy_num / num) * 1_00 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'''{solution(99)}''')
| 358 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : "DiagonalGaussianDistribution"
class lowercase_ ( __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : List[Any] = True
@register_to_config
def __init__( self : List[str] , _UpperCAmelCase : int = 3 , _UpperCAmelCase : int = 3 , _UpperCAmelCase : Tuple[str] = ("DownEncoderBlock2D",) , _UpperCAmelCase : Tuple[str] = ("UpDecoderBlock2D",) , _UpperCAmelCase : Tuple[int] = (64,) , _UpperCAmelCase : int = 1 , _UpperCAmelCase : str = "silu" , _UpperCAmelCase : int = 4 , _UpperCAmelCase : int = 32 , _UpperCAmelCase : int = 32 , _UpperCAmelCase : float = 0.1_8215 , ):
super().__init__()
# pass init params to Encoder
_A = Encoder(
in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , down_block_types=_UpperCAmelCase , block_out_channels=_UpperCAmelCase , layers_per_block=_UpperCAmelCase , act_fn=_UpperCAmelCase , norm_num_groups=_UpperCAmelCase , double_z=_UpperCAmelCase , )
# pass init params to Decoder
_A = Decoder(
in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , up_block_types=_UpperCAmelCase , block_out_channels=_UpperCAmelCase , layers_per_block=_UpperCAmelCase , norm_num_groups=_UpperCAmelCase , act_fn=_UpperCAmelCase , )
_A = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
_A = nn.Convad(_UpperCAmelCase , _UpperCAmelCase , 1 )
_A = False
_A = False
# only relevant if vae tiling is enabled
_A = self.config.sample_size
_A = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
_A = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
_A = 0.25
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : Tuple=False ):
if isinstance(_UpperCAmelCase , (Encoder, Decoder) ):
_A = value
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : bool = True ):
_A = use_tiling
def lowerCAmelCase_ ( self : Union[str, Any] ):
self.enable_tiling(_UpperCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
_A = True
def lowerCAmelCase_ ( self : str ):
_A = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowerCAmelCase_ ( self : str ):
_A = {}
def fn_recursive_add_processors(_UpperCAmelCase : str , _UpperCAmelCase : torch.nn.Module , _UpperCAmelCase : Dict[str, AttentionProcessor] ):
if hasattr(_UpperCAmelCase , 'set_processor' ):
_A = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''' , _UpperCAmelCase , _UpperCAmelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return processors
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
_A = len(self.attn_processors.keys() )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(_UpperCAmelCase )} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(_UpperCAmelCase : str , _UpperCAmelCase : torch.nn.Module , _UpperCAmelCase : int ):
if hasattr(_UpperCAmelCase , 'set_processor' ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
module.set_processor(_UpperCAmelCase )
else:
module.set_processor(processor.pop(F'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''' , _UpperCAmelCase , _UpperCAmelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowerCAmelCase_ ( self : List[str] ):
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : bool = True ):
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(_UpperCAmelCase , return_dict=_UpperCAmelCase )
if self.use_slicing and x.shape[0] > 1:
_A = [self.encoder(_UpperCAmelCase ) for x_slice in x.split(1 )]
_A = torch.cat(_UpperCAmelCase )
else:
_A = self.encoder(_UpperCAmelCase )
_A = self.quant_conv(_UpperCAmelCase )
_A = DiagonalGaussianDistribution(_UpperCAmelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : bool = True ):
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(_UpperCAmelCase , return_dict=_UpperCAmelCase )
_A = self.post_quant_conv(_UpperCAmelCase )
_A = self.decoder(_UpperCAmelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCAmelCase )
@apply_forward_hook
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : bool = True ):
if self.use_slicing and z.shape[0] > 1:
_A = [self._decode(_UpperCAmelCase ).sample for z_slice in z.split(1 )]
_A = torch.cat(_UpperCAmelCase )
else:
_A = self._decode(_UpperCAmelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=_UpperCAmelCase )
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : Any ):
_A = min(a.shape[2] , b.shape[2] , _UpperCAmelCase )
for y in range(_UpperCAmelCase ):
_A = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] ):
_A = min(a.shape[3] , b.shape[3] , _UpperCAmelCase )
for x in range(_UpperCAmelCase ):
_A = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : bool = True ):
_A = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
_A = int(self.tile_latent_min_size * self.tile_overlap_factor )
_A = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
_A = []
for i in range(0 , x.shape[2] , _UpperCAmelCase ):
_A = []
for j in range(0 , x.shape[3] , _UpperCAmelCase ):
_A = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
_A = self.encoder(_UpperCAmelCase )
_A = self.quant_conv(_UpperCAmelCase )
row.append(_UpperCAmelCase )
rows.append(_UpperCAmelCase )
_A = []
for i, row in enumerate(_UpperCAmelCase ):
_A = []
for j, tile in enumerate(_UpperCAmelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
_A = self.blend_v(rows[i - 1][j] , _UpperCAmelCase , _UpperCAmelCase )
if j > 0:
_A = self.blend_h(row[j - 1] , _UpperCAmelCase , _UpperCAmelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_UpperCAmelCase , dim=3 ) )
_A = torch.cat(_UpperCAmelCase , dim=2 )
_A = DiagonalGaussianDistribution(_UpperCAmelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : bool = True ):
_A = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
_A = int(self.tile_sample_min_size * self.tile_overlap_factor )
_A = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
_A = []
for i in range(0 , z.shape[2] , _UpperCAmelCase ):
_A = []
for j in range(0 , z.shape[3] , _UpperCAmelCase ):
_A = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
_A = self.post_quant_conv(_UpperCAmelCase )
_A = self.decoder(_UpperCAmelCase )
row.append(_UpperCAmelCase )
rows.append(_UpperCAmelCase )
_A = []
for i, row in enumerate(_UpperCAmelCase ):
_A = []
for j, tile in enumerate(_UpperCAmelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
_A = self.blend_v(rows[i - 1][j] , _UpperCAmelCase , _UpperCAmelCase )
if j > 0:
_A = self.blend_h(row[j - 1] , _UpperCAmelCase , _UpperCAmelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_UpperCAmelCase , dim=3 ) )
_A = torch.cat(_UpperCAmelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCAmelCase )
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[torch.Generator] = None , ):
_A = sample
_A = self.encode(_UpperCAmelCase ).latent_dist
if sample_posterior:
_A = posterior.sample(generator=_UpperCAmelCase )
else:
_A = posterior.mode()
_A = self.decode(_UpperCAmelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCAmelCase )
| 271 | 0 |
'''simple docstring'''
from collections.abc import Sequence
def __UpperCamelCase ( lowercase__ : Sequence[float], lowercase__ : float ):
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(lowercase__ ) )
def __UpperCamelCase ( lowercase__ : Sequence[float], lowercase__ : float ):
'''simple docstring'''
__lowercase =0.0
for coeff in reversed(lowercase__ ):
__lowercase =result * x + coeff
return result
if __name__ == "__main__":
UpperCAmelCase = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCAmelCase = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 141 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 6_5_0, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "pytorch",
"script": "run_ddp.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 6_0_0, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "tensorflow",
"script": "run_tf_dist.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 6_0_0, "eval_accuracy": 0.6, "eval_loss": 0.7},
},
] )
class lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : int ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='utf-8' , check=__lowercase , )
assert hasattr(self , 'env' )
def snake_case ( self : Tuple , __lowercase : List[str] ):
"""simple docstring"""
__lowercase =f'''{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'''
# distributed data settings
__lowercase ={'smdistributed': {'dataparallel': {'enabled': True}}} if self.script != 'run_ddp.py' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__lowercase , instance_count=__lowercase , instance_type=self.instance_type , debugger_hook_config=__lowercase , hyperparameters={**self.env.distributed_hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__lowercase , py_version='py36' , )
def snake_case ( self : int , __lowercase : List[str] ):
"""simple docstring"""
TrainingJobAnalytics(__lowercase ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(2,)] )
def snake_case ( self : Tuple , __lowercase : List[Any] ):
"""simple docstring"""
__lowercase =self.create_estimator(__lowercase )
# run training
estimator.fit()
# result dataframe
__lowercase =TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__lowercase =list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
__lowercase =list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowercase =(
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , __lowercase )
| 141 | 1 |
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ ):
stooge(UpperCamelCase_ , 0 , len(UpperCamelCase_ ) - 1 )
return arr
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
__SCREAMING_SNAKE_CASE = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(UpperCamelCase_ , UpperCamelCase_ , (h - t) )
# Recursively sort last 2/3 elements
stooge(UpperCamelCase_ , i + t , (UpperCamelCase_) )
# Recursively sort first 2/3 elements
stooge(UpperCamelCase_ , UpperCamelCase_ , (h - t) )
if __name__ == "__main__":
__magic_name__ = input("Enter numbers separated by a comma:\n").strip()
__magic_name__ = [int(item) for item in user_input.split(",")]
print(stooge_sort(unsorted))
| 255 |
"""simple docstring"""
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__magic_name__ = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"image_encoder": "vision_encoder",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"patch_embed.proj": "patch_embed.projection",
".norm": ".layer_norm",
"blocks": "layers",
}
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = {}
state_dict.pop("""pixel_mean""" , UpperCamelCase_ )
state_dict.pop("""pixel_std""" , UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = r""".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"""
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__SCREAMING_SNAKE_CASE = key.replace(UpperCamelCase_ , UpperCamelCase_ )
if re.match(UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = int(re.match(UpperCamelCase_ , UpperCamelCase_ ).group(2 ) )
if layer_nb == 0:
__SCREAMING_SNAKE_CASE = key.replace("""layers.0""" , """proj_in""" )
elif layer_nb == 1:
__SCREAMING_SNAKE_CASE = key.replace("""layers.1""" , """layers.0""" )
elif layer_nb == 2:
__SCREAMING_SNAKE_CASE = key.replace("""layers.2""" , """proj_out""" )
__SCREAMING_SNAKE_CASE = value
__SCREAMING_SNAKE_CASE = model_state_dict[
"""prompt_encoder.shared_embedding.positional_embedding"""
]
return model_state_dict
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="ybelkada/segment-anything" ):
__SCREAMING_SNAKE_CASE = hf_hub_download(UpperCamelCase_ , f"checkpoints/{model_name}.pth" )
if "sam_vit_b" in model_name:
__SCREAMING_SNAKE_CASE = SamConfig()
elif "sam_vit_l" in model_name:
__SCREAMING_SNAKE_CASE = SamVisionConfig(
hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
__SCREAMING_SNAKE_CASE = SamConfig(
vision_config=UpperCamelCase_ , )
elif "sam_vit_h" in model_name:
__SCREAMING_SNAKE_CASE = SamVisionConfig(
hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
__SCREAMING_SNAKE_CASE = SamConfig(
vision_config=UpperCamelCase_ , )
__SCREAMING_SNAKE_CASE = torch.load(UpperCamelCase_ , map_location="""cpu""" )
__SCREAMING_SNAKE_CASE = replace_keys(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = SamImageProcessor()
__SCREAMING_SNAKE_CASE = SamProcessor(image_processor=UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = SamModel(UpperCamelCase_ )
hf_model.load_state_dict(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = hf_model.to("""cuda""" )
__SCREAMING_SNAKE_CASE = """https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"""
__SCREAMING_SNAKE_CASE = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ).convert("""RGB""" )
__SCREAMING_SNAKE_CASE = [[[400, 650]]]
__SCREAMING_SNAKE_CASE = [[1]]
__SCREAMING_SNAKE_CASE = processor(images=np.array(UpperCamelCase_ ) , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = hf_model(**UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_890_251_159_668
__SCREAMING_SNAKE_CASE = processor(
images=np.array(UpperCamelCase_ ) , input_points=UpperCamelCase_ , input_labels=UpperCamelCase_ , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = hf_model(**UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_712_603_092_193_604
__SCREAMING_SNAKE_CASE = ((75, 275, 1725, 850),)
__SCREAMING_SNAKE_CASE = processor(images=np.array(UpperCamelCase_ ) , input_boxes=UpperCamelCase_ , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = hf_model(**UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_686_015_605_926_514
# Test with 2 points and 1 image.
__SCREAMING_SNAKE_CASE = [[[400, 650], [800, 650]]]
__SCREAMING_SNAKE_CASE = [[1, 1]]
__SCREAMING_SNAKE_CASE = processor(
images=np.array(UpperCamelCase_ ) , input_points=UpperCamelCase_ , input_labels=UpperCamelCase_ , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = hf_model(**UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_936_047_792_434_692
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
__magic_name__ = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"]
parser.add_argument(
"--model_name",
default="sam_vit_h_4b8939",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
parser.add_argument(
"--model_hub_id",
default="ybelkada/segment-anything",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
__magic_name__ = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 255 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A ( A_ , A_ , A_ , unittest.TestCase ):
UpperCamelCase_ : str =AltDiffusionPipeline
UpperCamelCase_ : List[str] =TEXT_TO_IMAGE_PARAMS
UpperCamelCase_ : Any =TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase_ : List[str] =TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase_ : Optional[int] =TEXT_TO_IMAGE_IMAGE_PARAMS
def _A (self ):
torch.manual_seed(0 )
__lowercase= UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
__lowercase= DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
__lowercase= AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
__lowercase= CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_2 , )
__lowercase= CLIPTextModel(lowerCAmelCase )
__lowercase= XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
__lowercase= 7_7
__lowercase= {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _A (self , lowerCAmelCase , lowerCAmelCase=0 ):
if str(lowerCAmelCase ).startswith('mps' ):
__lowercase= torch.manual_seed(lowerCAmelCase )
else:
__lowercase= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
__lowercase= {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def _A (self ):
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def _A (self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def _A (self ):
__lowercase= 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowercase= self.get_dummy_components()
torch.manual_seed(0 )
__lowercase= RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
__lowercase= RobertaSeriesModelWithTransformation(lowerCAmelCase )
__lowercase= text_encoder
__lowercase= AltDiffusionPipeline(**lowerCAmelCase )
__lowercase= alt_pipe.to(lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= self.get_dummy_inputs(lowerCAmelCase )
__lowercase= 'A photo of an astronaut'
__lowercase= alt_pipe(**lowerCAmelCase )
__lowercase= output.images
__lowercase= image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__lowercase= np.array(
[0.5_74_81_62, 0.60_44_71_45, 0.48_82_12_17, 0.50_10_06_36, 0.5_43_11_85, 0.45_76_36_83, 0.49_65_76_96, 0.48_13_27_33, 0.47_57_30_93] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _A (self ):
__lowercase= 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowercase= self.get_dummy_components()
__lowercase= PNDMScheduler(skip_prk_steps=lowerCAmelCase )
torch.manual_seed(0 )
__lowercase= RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
__lowercase= RobertaSeriesModelWithTransformation(lowerCAmelCase )
__lowercase= text_encoder
__lowercase= AltDiffusionPipeline(**lowerCAmelCase )
__lowercase= alt_pipe.to(lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= self.get_dummy_inputs(lowerCAmelCase )
__lowercase= alt_pipe(**lowerCAmelCase )
__lowercase= output.images
__lowercase= image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__lowercase= np.array(
[0.51_60_50_93, 0.5_70_72_41, 0.47_36_55_07, 0.50_57_88_86, 0.5_63_38_77, 0.4_64_25_03, 0.5_18_20_81, 0.48_76_34_84, 0.49_08_42_37] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def _A (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A (self ):
# make sure here that pndm scheduler skips prk
__lowercase= AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , safety_checker=lowerCAmelCase )
__lowercase= alt_pipe.to(lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= 'A painting of a squirrel eating a burger'
__lowercase= torch.manual_seed(0 )
__lowercase= alt_pipe([prompt] , generator=lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2_0 , output_type='np' )
__lowercase= output.images
__lowercase= image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.10_10, 0.08_00, 0.07_94, 0.08_85, 0.08_43, 0.07_62, 0.07_69, 0.07_29, 0.05_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _A (self ):
__lowercase= DDIMScheduler.from_pretrained('BAAI/AltDiffusion' , subfolder='scheduler' )
__lowercase= AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , scheduler=lowerCAmelCase , safety_checker=lowerCAmelCase )
__lowercase= alt_pipe.to(lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= 'A painting of a squirrel eating a burger'
__lowercase= torch.manual_seed(0 )
__lowercase= alt_pipe([prompt] , generator=lowerCAmelCase , num_inference_steps=2 , output_type='numpy' )
__lowercase= output.images
__lowercase= image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.40_19, 0.40_52, 0.38_10, 0.41_19, 0.39_16, 0.39_82, 0.46_51, 0.41_95, 0.53_23] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 295 |
from __future__ import annotations
def _lowerCamelCase( lowercase__ ) -> list[int]:
'''simple docstring'''
__lowercase= 2
__lowercase= []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(lowercase__ )
if n > 1:
factors.append(lowercase__ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 295 | 1 |
"""simple docstring"""
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
_a = logging.get_logger(__name__)
_a = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
_a = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
_a = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
_a = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
_a = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
_a = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
_a = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
_a = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
_a = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
_a = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
_a = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
_a = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
_a = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
_a = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
_a = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
_a = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
_a = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
_a = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
_a = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
_a = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
_a = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
_a = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
_a = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
_a = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
_a = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
_a = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
_a = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
_a = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class A_ (_BaseAutoModelClass ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = FLAX_MODEL_MAPPING
_a = auto_class_update(FlaxAutoModel)
class A_ (_BaseAutoModelClass ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = FLAX_MODEL_FOR_PRETRAINING_MAPPING
_a = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class A_ (_BaseAutoModelClass ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
_a = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class A_ (_BaseAutoModelClass ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = FLAX_MODEL_FOR_MASKED_LM_MAPPING
_a = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class A_ (_BaseAutoModelClass ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_a = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class A_ (_BaseAutoModelClass ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_a = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class A_ (_BaseAutoModelClass ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
_a = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class A_ (_BaseAutoModelClass ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
_a = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class A_ (_BaseAutoModelClass ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
_a = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class A_ (_BaseAutoModelClass ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
_a = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class A_ (_BaseAutoModelClass ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_a = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class A_ (_BaseAutoModelClass ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
_a = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class A_ (_BaseAutoModelClass ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
_a = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 23 |
"""simple docstring"""
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_a = 0
_a = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_a = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_a = tuple[int, int]
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : int = pos_x
UpperCAmelCase_ : List[Any] = pos_y
UpperCAmelCase_ : Union[str, Any] = (pos_y, pos_x)
UpperCAmelCase_ : Any = goal_x
UpperCAmelCase_ : Dict = goal_y
UpperCAmelCase_ : Any = g_cost
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : int = self.calculate_heuristic()
UpperCAmelCase_ : Any = self.g_cost + self.h_cost
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.pos_x - self.goal_x
UpperCAmelCase_ : Union[str, Any] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowercase_ ) + abs(lowercase_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , lowercase_ ):
"""simple docstring"""
return self.f_cost < other.f_cost
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowercase_ )
UpperCAmelCase_ : List[Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , lowercase_ )
UpperCAmelCase_ : str = [self.start]
UpperCAmelCase_ : list[Node] = []
UpperCAmelCase_ : int = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
UpperCAmelCase_ : List[str] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowercase_ )
self.closed_nodes.append(lowercase_ )
UpperCAmelCase_ : str = self.get_successors(lowercase_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowercase_ )
else:
# retrieve the best current path
UpperCAmelCase_ : Union[str, Any] = self.open_nodes.pop(self.open_nodes.index(lowercase_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowercase_ )
else:
self.open_nodes.append(lowercase_ )
return [self.start.pos]
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Any = []
for action in delta:
UpperCAmelCase_ : str = parent.pos_x + action[1]
UpperCAmelCase_ : int = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowercase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowercase_ , lowercase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowercase_ , ) )
return successors
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = node
UpperCAmelCase_ : int = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCAmelCase_ : Optional[int] = current_node.parent
path.reverse()
return path
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = AStar(lowercase_ , lowercase_ )
UpperCAmelCase_ : Optional[Any] = AStar(lowercase_ , lowercase_ )
UpperCAmelCase_ : Tuple = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
UpperCAmelCase_ : List[str] = self.fwd_astar.open_nodes.pop(0 )
UpperCAmelCase_ : List[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowercase_ , lowercase_ )
self.fwd_astar.closed_nodes.append(lowercase_ )
self.bwd_astar.closed_nodes.append(lowercase_ )
UpperCAmelCase_ : Tuple = current_bwd_node
UpperCAmelCase_ : str = current_fwd_node
UpperCAmelCase_ : Dict = {
self.fwd_astar: self.fwd_astar.get_successors(lowercase_ ),
self.bwd_astar: self.bwd_astar.get_successors(lowercase_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowercase_ )
else:
# retrieve the best current path
UpperCAmelCase_ : List[Any] = astar.open_nodes.pop(
astar.open_nodes.index(lowercase_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowercase_ )
else:
astar.open_nodes.append(lowercase_ )
return [self.fwd_astar.start.pos]
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.fwd_astar.retrace_path(lowercase_ )
UpperCAmelCase_ : int = self.bwd_astar.retrace_path(lowercase_ )
bwd_path.pop()
bwd_path.reverse()
UpperCAmelCase_ : Any = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_a = (0, 0)
_a = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_a = time.time()
_a = AStar(init, goal)
_a = a_star.search()
_a = time.time() - start_time
print(f"""AStar execution time = {end_time:f} seconds""")
_a = time.time()
_a = BidirectionalAStar(init, goal)
_a = time.time() - bd_start_time
print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 23 | 1 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase__ : Dict = logging.get_logger(__name__)
class __snake_case ( A_ ):
__lowerCamelCase = ["""audio_values""", """audio_mask"""]
def __init__( self , __UpperCamelCase=2048 , __UpperCamelCase=1 , __UpperCamelCase=[16, 16] , __UpperCamelCase=128 , __UpperCamelCase=44100 , __UpperCamelCase=86 , __UpperCamelCase=2048 , __UpperCamelCase=0.0 , **__UpperCamelCase , ) -> int:
'''simple docstring'''
super().__init__(
feature_size=snake_case_ , sampling_rate=snake_case_ , padding_value=snake_case_ , **snake_case_ , )
snake_case__ : Dict = spectrogram_length
snake_case__ : Tuple = num_channels
snake_case__ : Optional[int] = patch_size
snake_case__ : Dict = feature_size // self.patch_size[1]
snake_case__ : Dict = n_fft
snake_case__ : Optional[Any] = sampling_rate // hop_length_to_sampling_rate
snake_case__ : Dict = sampling_rate
snake_case__ : Tuple = padding_value
snake_case__ : int = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=snake_case_ , min_frequency=0.0 , max_frequency=22050.0 , sampling_rate=snake_case_ , norm='slaney' , mel_scale='slaney' , ).T
def __a ( self , __UpperCamelCase ) -> np.ndarray:
'''simple docstring'''
snake_case__ : Optional[int] = spectrogram(
snake_case_ , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=8_0.0 , )
snake_case__ : Optional[int] = log_spec[:, :-1]
snake_case__ : List[Any] = log_spec - 2_0.0
snake_case__ : Tuple = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , **__UpperCamelCase , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
F""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
F""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
snake_case__ : List[str] = isinstance(snake_case_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
snake_case__ : Any = is_batched_numpy or (
isinstance(snake_case_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
snake_case__ : int = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(snake_case_ , np.ndarray ):
snake_case__ : Optional[int] = np.asarray(snake_case_ , dtype=np.floataa )
elif isinstance(snake_case_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
snake_case__ : str = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
snake_case__ : Dict = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
snake_case__ : Union[str, Any] = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , snake_case_ ):
snake_case__ : Dict = [np.asarray(snake_case_ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
snake_case__ : Tuple = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
snake_case__ : Any = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
snake_case__ : Optional[Any] = np.array(snake_case_ ).astype(np.floataa )
# convert into correct format for padding
snake_case__ : Tuple = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
snake_case__ : str = np.ones([len(snake_case_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
snake_case__ : Optional[int] = padded_audio_features * self.padding_value
for i in range(len(snake_case_ ) ):
snake_case__ : str = audio_features[i]
snake_case__ : Optional[Any] = feature
# return as BatchFeature
if return_attention_mask:
snake_case__ : Union[str, Any] = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
snake_case__ : Any = {'audio_values': padded_audio_features}
snake_case__ : Dict = BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
return encoded_inputs
| 143 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class UpperCAmelCase_ ( A_ ):
lowercase__ = ['''image_processor''', '''feature_extractor''']
lowercase__ = '''TvltImageProcessor'''
lowercase__ = '''TvltFeatureExtractor'''
def __init__( self : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Optional[Any] ) -> Dict:
'''simple docstring'''
super().__init__(image_processor=snake_case_ , feature_extractor=snake_case_ )
A__ = image_processor
A__ = feature_extractor
def __call__( self : List[Any] , snake_case_ : List[str]=None , snake_case_ : Dict=None , snake_case_ : List[str]=None , snake_case_ : List[str]=None , snake_case_ : Dict=False , snake_case_ : Union[str, Any]=False , *snake_case_ : List[str] , **snake_case_ : List[Any] , ) -> List[str]:
'''simple docstring'''
if images is None and audio is None:
raise ValueError("You need to specify either an `images` or `audio` input to process." )
A__ = None
if images is not None:
A__ = self.image_processor(snake_case_ , mask_pixel=snake_case_ , *snake_case_ , **snake_case_ )
if images_mixed is not None:
A__ = self.image_processor(snake_case_ , is_mixed=snake_case_ , *snake_case_ , **snake_case_ )
if audio is not None:
A__ = self.feature_extractor(
snake_case_ , *snake_case_ , sampling_rate=snake_case_ , mask_audio=snake_case_ , **snake_case_ )
A__ = {}
if audio is not None:
output_dict.update(snake_case_ )
if images is not None:
output_dict.update(snake_case_ )
if images_mixed_dict is not None:
output_dict.update(snake_case_ )
return output_dict
@property
def __magic_name__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
A__ = self.image_processor.model_input_names
A__ = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 247 | 0 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 4_0_0 * 2**2_0, 6_0_0 * 2**2_0] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 1_0_0 * 2**2_0, 9_0_0 * 2**2_0] )
def __lowercase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] ):
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , __lowerCAmelCase )
a__ = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
a__ = dataset_size < in_memory_max_size
else:
a__ = False
a__ = is_small_dataset(__lowerCAmelCase )
assert result == expected
| 109 |
from collections import defaultdict
from math import ceil, sqrt
def __lowercase ( __lowerCAmelCase : int = 1_0_0_0_0_0_0 , __lowerCAmelCase : int = 1_0 ):
a__ = defaultdict(__lowerCAmelCase )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
a__ = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
a__ = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(__lowerCAmelCase , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 1_0 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 109 | 1 |
import flax.linen as nn
import jax
import jax.numpy as jnp
class _A ( nn.Module ):
UpperCamelCase__ : int
UpperCamelCase__ : jnp.dtype = jnp.floataa
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Tuple , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
__a , __a , __a , __a = hidden_states.shape
__a = jax.image.resize(
__SCREAMING_SNAKE_CASE , shape=(batch, height * 2, width * 2, channels) , method='''nearest''' , )
__a = self.conv(__SCREAMING_SNAKE_CASE)
return hidden_states
class _A ( nn.Module ):
UpperCamelCase__ : int
UpperCamelCase__ : jnp.dtype = jnp.floataa
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
__a = self.conv(__SCREAMING_SNAKE_CASE)
return hidden_states
class _A ( nn.Module ):
UpperCamelCase__ : int
UpperCamelCase__ : int = None
UpperCamelCase__ : float = 0.0
UpperCamelCase__ : bool = None
UpperCamelCase__ : jnp.dtype = jnp.floataa
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = self.in_channels if self.out_channels is None else self.out_channels
__a = nn.GroupNorm(num_groups=32 , epsilon=1E-5)
__a = nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__a = nn.Dense(__SCREAMING_SNAKE_CASE , dtype=self.dtype)
__a = nn.GroupNorm(num_groups=32 , epsilon=1E-5)
__a = nn.Dropout(self.dropout_prob)
__a = nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__a = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__a = None
if use_nin_shortcut:
__a = nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , strides=(1, 1) , padding='''VALID''' , dtype=self.dtype , )
def __call__( self : str , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int=True):
'''simple docstring'''
__a = hidden_states
__a = self.norma(__SCREAMING_SNAKE_CASE)
__a = nn.swish(__SCREAMING_SNAKE_CASE)
__a = self.conva(__SCREAMING_SNAKE_CASE)
__a = self.time_emb_proj(nn.swish(__SCREAMING_SNAKE_CASE))
__a = jnp.expand_dims(jnp.expand_dims(__SCREAMING_SNAKE_CASE , 1) , 1)
__a = hidden_states + temb
__a = self.norma(__SCREAMING_SNAKE_CASE)
__a = nn.swish(__SCREAMING_SNAKE_CASE)
__a = self.dropout(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = self.conva(__SCREAMING_SNAKE_CASE)
if self.conv_shortcut is not None:
__a = self.conv_shortcut(__SCREAMING_SNAKE_CASE)
return hidden_states + residual
| 49 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def __snake_case ( ):
__a , __a = 9, 14 # noqa: F841
__a = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
__a = defaultdict(_UpperCAmelCase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
__a = mst(_UpperCAmelCase )
__a = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
__a = tuple(answer[:2] )
__a = tuple(edge[::-1] )
assert edge in result or reverse in result
| 49 | 1 |
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] ='\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
SCREAMING_SNAKE_CASE_: Optional[int] =[{'type': 'code', 'content': INSTALL_CONTENT}]
SCREAMING_SNAKE_CASE_: List[Any] ={
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 106 | '''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE_: List[str] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Tuple ={
'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json',
}
class __A ( UpperCamelCase__ , UpperCamelCase__ ):
a__ : Optional[Any] = """resnet"""
a__ : Tuple = ["""basic""", """bottleneck"""]
def __init__(self : List[Any] , __a : Any=3 , __a : Dict=64 , __a : Union[str, Any]=[256, 512, 1024, 2048] , __a : str=[3, 4, 6, 3] , __a : Optional[Any]="bottleneck" , __a : Tuple="relu" , __a : int=False , __a : Optional[int]=None , __a : str=None , **__a : Dict , ):
super().__init__(**__a )
if layer_type not in self.layer_types:
raise ValueError(f"""layer_type={layer_type} is not one of {",".join(self.layer_types )}""" )
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = embedding_size
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = depths
UpperCAmelCase_ = layer_type
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = downsample_in_first_stage
UpperCAmelCase_ = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(__a ) + 1 )]
UpperCAmelCase_ , UpperCAmelCase_ = get_aligned_output_features_output_indices(
out_features=__a , out_indices=__a , stage_names=self.stage_names )
class __A ( UpperCamelCase__ ):
a__ : int = version.parse("""1.11""" )
@property
def _lowercase (self : Optional[int] ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _lowercase (self : str ):
return 1E-3
| 106 | 1 |
import collections
import importlib.util
import os
import re
from pathlib import Path
__UpperCAmelCase = 'src/transformers'
# Matches is_xxx_available()
__UpperCAmelCase = re.compile(R'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
__UpperCAmelCase = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__UpperCAmelCase = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
__UpperCAmelCase = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
__UpperCAmelCase = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__UpperCAmelCase = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
__UpperCAmelCase = re.compile('^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
__UpperCAmelCase = re.compile('^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
__UpperCAmelCase = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
__UpperCAmelCase = re.compile(R'^\s*try:')
# Catches a line with else:
__UpperCAmelCase = re.compile(R'^\s*else:')
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
if _re_test_backend.search(_a ) is None:
return None
UpperCAmelCase_ : int = [b[0] for b in _re_backend.findall(_a )]
backends.sort()
return "_and_".join(_a )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
with open(_a , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCAmelCase_ : str = f.readlines()
UpperCAmelCase_ : Optional[int] = 0
while line_index < len(_a ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_a ):
return None
# First grab the objects without a specific backend in _import_structure
UpperCAmelCase_ : List[str] = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
UpperCAmelCase_ : List[str] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_a ):
UpperCAmelCase_ : Tuple = _re_one_line_import_struct.search(_a ).groups()[0]
UpperCAmelCase_ : Union[str, Any] = re.findall('\[([^\]]+)\]' , _a )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
UpperCAmelCase_ : List[str] = _re_import_struct_key_value.search(_a )
if single_line_import_search is not None:
UpperCAmelCase_ : Optional[Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(_a ) > 0]
objects.extend(_a )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
UpperCAmelCase_ : int = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCAmelCase_ : Tuple = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase_ : List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase_ : Optional[int] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
UpperCAmelCase_ : List[Any] = lines[line_index]
if _re_import_struct_add_one.search(_a ) is not None:
objects.append(_re_import_struct_add_one.search(_a ).groups()[0] )
elif _re_import_struct_add_many.search(_a ) is not None:
UpperCAmelCase_ : Any = _re_import_struct_add_many.search(_a ).groups()[0].split(', ' )
UpperCAmelCase_ : Any = [obj[1:-1] for obj in imports if len(_a ) > 0]
objects.extend(_a )
elif _re_between_brackets.search(_a ) is not None:
UpperCAmelCase_ : str = _re_between_brackets.search(_a ).groups()[0].split(', ' )
UpperCAmelCase_ : Tuple = [obj[1:-1] for obj in imports if len(_a ) > 0]
objects.extend(_a )
elif _re_quote_object.search(_a ) is not None:
objects.append(_re_quote_object.search(_a ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
UpperCAmelCase_ : Tuple = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCAmelCase_ : int = []
while (
line_index < len(_a )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
UpperCAmelCase_ : int = lines[line_index]
UpperCAmelCase_ : Optional[int] = _re_import.search(_a )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
UpperCAmelCase_ : Union[str, Any] = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(_a ):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCAmelCase_ : Tuple = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase_ : Any = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase_ : Dict = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
UpperCAmelCase_ : Dict = lines[line_index]
UpperCAmelCase_ : Optional[int] = _re_import.search(_a )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
UpperCAmelCase_ : Dict = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowercase__ ( __snake_case : str , __snake_case : List[Any] ):
'''simple docstring'''
def find_duplicates(__snake_case : Optional[int] ):
return [k for k, v in collections.Counter(_a ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
UpperCAmelCase_ : Any = []
for key in import_dict_objects.keys():
UpperCAmelCase_ : int = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" )
UpperCAmelCase_ : List[str] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
UpperCAmelCase_ : List[Any] = '''base imports''' if key == '''none''' else F"{key} backend"
errors.append(F"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F" {a} in _import_structure but not in TYPE_HINT." )
return errors
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Dict = []
for root, _, files in os.walk(_a ):
if "__init__.py" in files:
UpperCAmelCase_ : Dict = os.path.join(_a , '__init__.py' )
UpperCAmelCase_ : Tuple = parse_init(_a )
if objects is not None:
UpperCAmelCase_ : Union[str, Any] = analyze_results(*_a )
if len(_a ) > 0:
UpperCAmelCase_ : List[Any] = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append('\n'.join(_a ) )
if len(_a ) > 0:
raise ValueError('\n\n'.join(_a ) )
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Any = []
for path, directories, files in os.walk(_a ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(_a )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_a ) / folder).glob('*.py' ) ) ) == 0:
continue
UpperCAmelCase_ : Optional[Any] = str((Path(_a ) / folder).relative_to(_a ) )
UpperCAmelCase_ : Optional[Any] = short_path.replace(os.path.sep , '.' )
submodules.append(_a )
for fname in files:
if fname == "__init__.py":
continue
UpperCAmelCase_ : Optional[int] = str((Path(_a ) / fname).relative_to(_a ) )
UpperCAmelCase_ : Optional[int] = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(_a )
return submodules
__UpperCAmelCase = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
]
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = importlib.util.spec_from_file_location(
'transformers' , os.path.join(_a , '__init__.py' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
UpperCAmelCase_ : str = spec.loader.load_module()
UpperCAmelCase_ : List[str] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(_a ) > 0:
UpperCAmelCase_ : Optional[int] = '''\n'''.join(F"- {module}" for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
F"{list_of_modules}\n"
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 29 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
lowerCamelCase = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
lowerCamelCase = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def lowerCamelCase_ ( _a ):
"""simple docstring"""
lowerCAmelCase__ : Dict = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=_a )[0]
@deprecated(_a , '''Please use tf.data to implement this functionality.''' )
def lowerCamelCase_ ( _a ):
"""simple docstring"""
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_a ) as bytestream:
lowerCAmelCase__ : Any = _readaa(_a )
if magic != 2_051:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
lowerCAmelCase__ : Any = _readaa(_a )
lowerCAmelCase__ : Tuple = _readaa(_a )
lowerCAmelCase__ : List[Any] = _readaa(_a )
lowerCAmelCase__ : Union[str, Any] = bytestream.read(rows * cols * num_images )
lowerCAmelCase__ : List[Any] = numpy.frombuffer(_a , dtype=numpy.uinta )
lowerCAmelCase__ : int = data.reshape(_a , _a , _a , 1 )
return data
@deprecated(_a , '''Please use tf.one_hot on tensors.''' )
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = labels_dense.shape[0]
lowerCAmelCase__ : Optional[Any] = numpy.arange(_a ) * num_classes
lowerCAmelCase__ : str = numpy.zeros((num_labels, num_classes) )
lowerCAmelCase__ : Optional[Any] = 1
return labels_one_hot
@deprecated(_a , '''Please use tf.data to implement this functionality.''' )
def lowerCamelCase_ ( _a , _a=False , _a=10 ):
"""simple docstring"""
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_a ) as bytestream:
lowerCAmelCase__ : Optional[int] = _readaa(_a )
if magic != 2_049:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
lowerCAmelCase__ : Union[str, Any] = _readaa(_a )
lowerCAmelCase__ : Tuple = bytestream.read(_a )
lowerCAmelCase__ : Dict = numpy.frombuffer(_a , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_a , _a )
return labels
class _a :
@deprecated(
_SCREAMING_SNAKE_CASE , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self : Dict , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple=False , _SCREAMING_SNAKE_CASE : Any=False , _SCREAMING_SNAKE_CASE : Optional[Any]=dtypes.floataa , _SCREAMING_SNAKE_CASE : List[str]=True , _SCREAMING_SNAKE_CASE : List[str]=None , )-> List[Any]:
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = random_seed.get_seed(_SCREAMING_SNAKE_CASE )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
lowerCAmelCase__ : Optional[int] = dtypes.as_dtype(_SCREAMING_SNAKE_CASE ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype )
if fake_data:
lowerCAmelCase__ : int = 1_0000
lowerCAmelCase__ : List[Any] = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F'images.shape: {images.shape} labels.shape: {labels.shape}'
lowerCAmelCase__ : List[Any] = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowerCAmelCase__ : Tuple = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowerCAmelCase__ : Any = images.astype(numpy.floataa )
lowerCAmelCase__ : Any = numpy.multiply(_SCREAMING_SNAKE_CASE , 1.0 / 255.0 )
lowerCAmelCase__ : Tuple = images
lowerCAmelCase__ : Tuple = labels
lowerCAmelCase__ : List[Any] = 0
lowerCAmelCase__ : Tuple = 0
@property
def UpperCAmelCase__( self : Tuple )-> Dict:
return self._images
@property
def UpperCAmelCase__( self : Tuple )-> Optional[int]:
return self._labels
@property
def UpperCAmelCase__( self : Tuple )-> Dict:
return self._num_examples
@property
def UpperCAmelCase__( self : Tuple )-> Any:
return self._epochs_completed
def UpperCAmelCase__( self : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Dict=False , _SCREAMING_SNAKE_CASE : Optional[int]=True )-> List[str]:
if fake_data:
lowerCAmelCase__ : Dict = [1] * 784
lowerCAmelCase__ : Union[str, Any] = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(_SCREAMING_SNAKE_CASE )],
[fake_label for _ in range(_SCREAMING_SNAKE_CASE )],
)
lowerCAmelCase__ : str = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowerCAmelCase__ : Union[str, Any] = numpy.arange(self._num_examples )
numpy.random.shuffle(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[Any] = self.images[perma]
lowerCAmelCase__ : Tuple = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowerCAmelCase__ : Any = self._num_examples - start
lowerCAmelCase__ : List[str] = self._images[start : self._num_examples]
lowerCAmelCase__ : Tuple = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowerCAmelCase__ : Union[str, Any] = numpy.arange(self._num_examples )
numpy.random.shuffle(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : str = self.images[perm]
lowerCAmelCase__ : List[Any] = self.labels[perm]
# Start next epoch
lowerCAmelCase__ : Dict = 0
lowerCAmelCase__ : Union[str, Any] = batch_size - rest_num_examples
lowerCAmelCase__ : Any = self._index_in_epoch
lowerCAmelCase__ : Optional[Any] = self._images[start:end]
lowerCAmelCase__ : Optional[Any] = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
lowerCAmelCase__ : Dict = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_a , '''Please write your own downloading logic.''' )
def lowerCamelCase_ ( _a , _a , _a ):
"""simple docstring"""
if not gfile.Exists(_a ):
gfile.MakeDirs(_a )
lowerCAmelCase__ : str = os.path.join(_a , _a )
if not gfile.Exists(_a ):
urllib.request.urlretrieve(_a , _a ) # noqa: S310
with gfile.GFile(_a ) as f:
lowerCAmelCase__ : Optional[Any] = f.size()
print('''Successfully downloaded''' , _a , _a , '''bytes.''' )
return filepath
@deprecated(
_a , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def lowerCamelCase_ ( _a , _a=False , _a=False , _a=dtypes.floataa , _a=True , _a=5_000 , _a=None , _a=DEFAULT_SOURCE_URL , ):
"""simple docstring"""
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=_a , one_hot=_a , dtype=_a , seed=_a )
lowerCAmelCase__ : Tuple = fake()
lowerCAmelCase__ : Union[str, Any] = fake()
lowerCAmelCase__ : Tuple = fake()
return _Datasets(train=_a , validation=_a , test=_a )
if not source_url: # empty string check
lowerCAmelCase__ : Optional[Any] = DEFAULT_SOURCE_URL
lowerCAmelCase__ : Tuple = '''train-images-idx3-ubyte.gz'''
lowerCAmelCase__ : Dict = '''train-labels-idx1-ubyte.gz'''
lowerCAmelCase__ : List[str] = '''t10k-images-idx3-ubyte.gz'''
lowerCAmelCase__ : Optional[int] = '''t10k-labels-idx1-ubyte.gz'''
lowerCAmelCase__ : Optional[Any] = _maybe_download(
_a , _a , source_url + train_images_file )
with gfile.Open(_a , '''rb''' ) as f:
lowerCAmelCase__ : Optional[Any] = _extract_images(_a )
lowerCAmelCase__ : Any = _maybe_download(
_a , _a , source_url + train_labels_file )
with gfile.Open(_a , '''rb''' ) as f:
lowerCAmelCase__ : Any = _extract_labels(_a , one_hot=_a )
lowerCAmelCase__ : Any = _maybe_download(
_a , _a , source_url + test_images_file )
with gfile.Open(_a , '''rb''' ) as f:
lowerCAmelCase__ : str = _extract_images(_a )
lowerCAmelCase__ : Dict = _maybe_download(
_a , _a , source_url + test_labels_file )
with gfile.Open(_a , '''rb''' ) as f:
lowerCAmelCase__ : int = _extract_labels(_a , one_hot=_a )
if not 0 <= validation_size <= len(_a ):
lowerCAmelCase__ : Dict = (
'''Validation size should be between 0 and '''
f'{len(_a )}. Received: {validation_size}.'
)
raise ValueError(_a )
lowerCAmelCase__ : List[str] = train_images[:validation_size]
lowerCAmelCase__ : Any = train_labels[:validation_size]
lowerCAmelCase__ : Optional[Any] = train_images[validation_size:]
lowerCAmelCase__ : Optional[int] = train_labels[validation_size:]
lowerCAmelCase__ : Optional[Any] = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
lowerCAmelCase__ : List[str] = _DataSet(_a , _a , **_a )
lowerCAmelCase__ : Dict = _DataSet(_a , _a , **_a )
lowerCAmelCase__ : Dict = _DataSet(_a , _a , **_a )
return _Datasets(train=_a , validation=_a , test=_a )
| 131 | 0 |
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def lowerCAmelCase__ ( ):
_A : Dict = ArgumentParser('Diffusers CLI tool' ,usage='diffusers-cli <command> [<args>]' )
_A : Tuple = parser.add_subparsers(help='diffusers-cli command helpers' )
# Register commands
EnvironmentCommand.register_subcommand(lowerCamelCase )
# Let's go
_A : Optional[Any] = parser.parse_args()
if not hasattr(lowerCamelCase ,'func' ):
parser.print_help()
exit(1 )
# Run
_A : Tuple = args.func(lowerCamelCase )
service.run()
if __name__ == "__main__":
main()
| 363 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowerCamelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
a = KandinskyVaaPriorPipeline
a = ["prompt"]
a = ["prompt", "negative_prompt"]
a = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
a = False
@property
def A ( self : List[str]):
return 32
@property
def A ( self : List[Any]):
return 32
@property
def A ( self : Dict):
return self.time_input_dim
@property
def A ( self : Tuple):
return self.time_input_dim * 4
@property
def A ( self : Optional[int]):
return 100
@property
def A ( self : Dict):
_A : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
return tokenizer
@property
def A ( self : Optional[Any]):
torch.manual_seed(0)
_A : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE)
@property
def A ( self : List[Any]):
torch.manual_seed(0)
_A : Optional[Any] = {
'num_attention_heads': 2,
'attention_head_dim': 12,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
_A : Any = PriorTransformer(**SCREAMING_SNAKE_CASE)
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
_A : str = nn.Parameter(torch.ones(model.clip_std.shape))
return model
@property
def A ( self : List[str]):
torch.manual_seed(0)
_A : List[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
_A : Union[str, Any] = CLIPVisionModelWithProjection(SCREAMING_SNAKE_CASE)
return model
@property
def A ( self : int):
_A : Optional[Any] = CLIPImageProcessor(
crop_size=224 , do_center_crop=SCREAMING_SNAKE_CASE , do_normalize=SCREAMING_SNAKE_CASE , do_resize=SCREAMING_SNAKE_CASE , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=224 , )
return image_processor
def A ( self : Optional[Any]):
_A : Optional[int] = self.dummy_prior
_A : Dict = self.dummy_image_encoder
_A : Dict = self.dummy_text_encoder
_A : str = self.dummy_tokenizer
_A : Optional[Any] = self.dummy_image_processor
_A : Optional[Any] = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=SCREAMING_SNAKE_CASE , clip_sample_range=10.0 , )
_A : Dict = {
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def A ( self : Tuple , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str]=0):
if str(SCREAMING_SNAKE_CASE).startswith('mps'):
_A : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE)
else:
_A : int = torch.Generator(device=SCREAMING_SNAKE_CASE).manual_seed(SCREAMING_SNAKE_CASE)
_A : List[Any] = {
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def A ( self : List[Any]):
_A : str = 'cpu'
_A : Tuple = self.get_dummy_components()
_A : List[Any] = self.pipeline_class(**SCREAMING_SNAKE_CASE)
_A : Any = pipe.to(SCREAMING_SNAKE_CASE)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE)
_A : Dict = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE))
_A : str = output.image_embeds
_A : Optional[int] = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE) , return_dict=SCREAMING_SNAKE_CASE , )[0]
_A : Optional[int] = image[0, -10:]
_A : int = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
_A : Dict = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@skip_mps
def A ( self : Any):
_A : Tuple = torch_device == 'cpu'
_A : Optional[int] = True
_A : Tuple = False
self._test_inference_batch_single_identical(
test_max_difference=SCREAMING_SNAKE_CASE , relax_max_difference=SCREAMING_SNAKE_CASE , test_mean_pixel_difference=SCREAMING_SNAKE_CASE , )
@skip_mps
def A ( self : int):
_A : Tuple = torch_device == 'cpu'
_A : Optional[Any] = False
self._test_attention_slicing_forward_pass(
test_max_difference=SCREAMING_SNAKE_CASE , test_mean_pixel_difference=SCREAMING_SNAKE_CASE , )
| 227 | 0 |
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def _UpperCAmelCase ( a__):
'''simple docstring'''
a_ : int = [False] * len(a__)
a_ : str = [-1] * len(a__)
def dfs(a__ , a__):
a_ : int = True
a_ : List[Any] = c
for u in graph[v]:
if not visited[u]:
dfs(a__ , 1 - c)
for i in range(len(a__)):
if not visited[i]:
dfs(a__ , 0)
for i in range(len(a__)):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
__snake_case : Union[str, Any] = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 248 |
# Algorithm for the pigeonhole sorting
def _UpperCAmelCase ( a__):
'''simple docstring'''
a_ : List[Any] = min(a__) # min() finds the minimum value
a_ : List[str] = max(a__) # max() finds the maximum value
a_ : str = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
a_ : Any = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(a__ , a__), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
a_ : Tuple = 0
for count in range(a__):
while holes[count] > 0:
holes[count] -= 1
a_ : Optional[Any] = count + min_val
i += 1
def _UpperCAmelCase ( ):
'''simple docstring'''
a_ : List[Any] = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(a__)
print("""Sorted order is:""" , """ """.join(a__))
if __name__ == "__main__":
main()
| 248 | 1 |
"""simple docstring"""
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class UpperCamelCase :
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
raise NotImplementedError()
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
raise NotImplementedError()
class UpperCamelCase ( lowercase_ ):
def __init__( self ,__UpperCamelCase ,__UpperCamelCase = False ,**__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ : Any = tokenizer
lowercase_ : List[Any] = skip_prompt
lowercase_ : Optional[Any] = decode_kwargs
# variables used in the streaming process
lowercase_ : Tuple = []
lowercase_ : List[Any] = 0
lowercase_ : Tuple = True
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError('TextStreamer only supports batch size 1' )
elif len(value.shape ) > 1:
lowercase_ : int = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
lowercase_ : str = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
lowercase_ : Optional[int] = self.tokenizer.decode(self.token_cache ,**self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith('\n' ):
lowercase_ : Optional[int] = text[self.print_len :]
lowercase_ : str = []
lowercase_ : Tuple = 0
# If the last token is a CJK character, we print the characters.
elif len(__UpperCamelCase ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
lowercase_ : str = text[self.print_len :]
self.print_len += len(__UpperCamelCase )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
lowercase_ : List[Any] = text[self.print_len : text.rfind(' ' ) + 1]
self.print_len += len(__UpperCamelCase )
self.on_finalized_text(__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
if len(self.token_cache ) > 0:
lowercase_ : Any = self.tokenizer.decode(self.token_cache ,**self.decode_kwargs )
lowercase_ : Optional[int] = text[self.print_len :]
lowercase_ : Union[str, Any] = []
lowercase_ : Dict = 0
else:
lowercase_ : Union[str, Any] = ''
lowercase_ : Optional[int] = True
self.on_finalized_text(__UpperCamelCase ,stream_end=__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = False ) -> Union[str, Any]:
'''simple docstring'''
print(__UpperCamelCase ,flush=__UpperCamelCase ,end='' if not stream_end else None )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
if (
(cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F)
or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) #
or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) #
or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) #
or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) #
or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F)
or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) #
): #
return True
return False
class UpperCamelCase ( lowercase_ ):
def __init__( self ,__UpperCamelCase ,__UpperCamelCase = False ,__UpperCamelCase = None ,**__UpperCamelCase ) -> str:
'''simple docstring'''
super().__init__(__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase )
lowercase_ : Tuple = Queue()
lowercase_ : List[str] = None
lowercase_ : Union[str, Any] = timeout
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = False ) -> List[str]:
'''simple docstring'''
self.text_queue.put(__UpperCamelCase ,timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal ,timeout=self.timeout )
def __iter__( self ) -> Union[str, Any]:
'''simple docstring'''
return self
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Optional[Any] = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 361 | """simple docstring"""
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def lowercase__( *__SCREAMING_SNAKE_CASE : Tuple ):
with open(__SCREAMING_SNAKE_CASE , 'r' ) as fh:
fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_EX )
try:
print(*__SCREAMING_SNAKE_CASE )
finally:
fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_UN )
__SCREAMING_SNAKE_CASE =int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
__SCREAMING_SNAKE_CASE =torch.device("cuda", local_rank)
__SCREAMING_SNAKE_CASE =socket.gethostname()
__SCREAMING_SNAKE_CASE =F"[{hostname}-{local_rank}]"
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__SCREAMING_SNAKE_CASE =dist.get_rank()
__SCREAMING_SNAKE_CASE =dist.get_world_size()
printflock(F"{gpu} is OK (global rank: {rank}/{world_size})")
dist.barrier()
if rank == 0:
printflock(F"pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}")
except Exception:
printflock(F"{gpu} is broken")
raise
| 321 | 0 |
from collections import defaultdict
class __snake_case :
def __init__( self , snake_case__ , snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
UpperCAmelCase : Optional[int] =[
[-1 for i in range(total + 1 )] for j in range(2 ** len(__lowerCAmelCase ) )
]
UpperCAmelCase : Any =defaultdict(__lowerCAmelCase ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
UpperCAmelCase : Optional[Any] =(1 << len(__lowerCAmelCase )) - 1
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> List[Any]:
'''simple docstring'''
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
UpperCAmelCase : int =self.count_ways_until(__lowerCAmelCase , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
UpperCAmelCase : Union[str, Any] =total_ways_util
return self.dp[mask][task_no]
def UpperCAmelCase__ ( self , snake_case__ ) -> List[str]:
'''simple docstring'''
for i in range(len(__lowerCAmelCase ) ):
for j in task_performed[i]:
self.task[j].append(__lowerCAmelCase )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
__snake_case = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
__snake_case = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 348 | '''simple docstring'''
def snake_case__ ( _A: str ) -> list[int]:
'''simple docstring'''
lowerCAmelCase = [0 for i in range(len(_A ) )]
# initialize interval's left pointer and right pointer
lowerCAmelCase , lowerCAmelCase = 0, 0
for i in range(1 , len(_A ) ):
# case when current index is inside the interval
if i <= right_pointer:
lowerCAmelCase = min(right_pointer - i + 1 , z_result[i - left_pointer] )
lowerCAmelCase = min_edge
while go_next(_A , _A , _A ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
lowerCAmelCase , lowerCAmelCase = i, i + z_result[i] - 1
return z_result
def snake_case__ ( _A: int , _A: list[int] , _A: str ) -> bool:
'''simple docstring'''
return i + z_result[i] < len(_A ) and s[z_result[i]] == s[i + z_result[i]]
def snake_case__ ( _A: str , _A: str ) -> int:
'''simple docstring'''
lowerCAmelCase = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
lowerCAmelCase = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_A ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 272 | 0 |
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
snake_case : int = get_logger()
snake_case : Optional[dict] = None
class _snake_case ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase ):
super().__init__(features=snake_case__ )
import jax
from jaxlib.xla_client import Device
if isinstance(snake_case__ , snake_case__ ):
raise ValueError(
F'''Expected {device} to be a `str` not {type(snake_case__ )}, as `jaxlib.xla_extension.Device` '''
'''is not serializable neither with `pickle` nor with `dill`. Instead you can surround '''
'''the device with `str()` to get its string identifier that will be internally mapped '''
'''to the actual `jaxlib.xla_extension.Device`.''' )
a :Tuple = device if isinstance(snake_case__ , snake_case__ ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
a :List[str] = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F'''Device with string identifier {self.device} not listed among the available '''
F'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
F'''device: {str(jax.devices()[0] )}.''' )
a :Tuple = str(jax.devices()[0] )
a :Optional[int] = jnp_array_kwargs
@staticmethod
def SCREAMING_SNAKE_CASE__ ( ):
import jax
return {str(snake_case__ ): device for device in jax.devices()}
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
import jax
import jax.numpy as jnp
if isinstance(snake_case__ , snake_case__ ) and column:
if all(
isinstance(snake_case__ , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(snake_case__ , axis=0 )
return column
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
import jax
import jax.numpy as jnp
if isinstance(snake_case__ , (str, bytes, type(snake_case__ )) ):
return value
elif isinstance(snake_case__ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
a :Tuple = {}
if isinstance(snake_case__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
a :Union[str, Any] = {'dtype': jnp.intaa}
else:
a :Optional[Any] = {'dtype': jnp.intaa}
elif isinstance(snake_case__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
a :Optional[Any] = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(snake_case__ , PIL.Image.Image ):
a :List[Any] = np.asarray(snake_case__ )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
a :Union[str, Any] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(snake_case__ , **{**default_dtype, **self.jnp_array_kwargs} )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(snake_case__ , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(snake_case__ , '''__array__''' ) and not isinstance(snake_case__ , jax.Array ):
a :Dict = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(snake_case__ , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(snake_case__ ) for substruct in data_struct] )
elif isinstance(snake_case__ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(snake_case__ ) for substruct in data_struct] )
return self._tensorize(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
return map_nested(self._recursive_tensorize , snake_case__ , map_list=snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :Union[str, Any] = self.numpy_arrow_extractor().extract_row(snake_case__ )
a :List[str] = self.python_features_decoder.decode_row(snake_case__ )
return self.recursive_tensorize(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :str = self.numpy_arrow_extractor().extract_column(snake_case__ )
a :List[Any] = self.python_features_decoder.decode_column(snake_case__ , pa_table.column_names[0] )
a :int = self.recursive_tensorize(snake_case__ )
a :Dict = self._consolidate(snake_case__ )
return column
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :Optional[int] = self.numpy_arrow_extractor().extract_batch(snake_case__ )
a :Union[str, Any] = self.python_features_decoder.decode_batch(snake_case__ )
a :int = self.recursive_tensorize(snake_case__ )
for column_name in batch:
a :List[Any] = self._consolidate(batch[column_name] )
return batch
| 353 |
from ...configuration_utils import PretrainedConfig
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = 'bert-generation'
def __init__( self , _lowerCamelCase=5_0358 , _lowerCamelCase=1024 , _lowerCamelCase=24 , _lowerCamelCase=16 , _lowerCamelCase=4096 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-12 , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase="absolute" , _lowerCamelCase=True , **_lowerCamelCase , ):
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
a :Optional[int] = vocab_size
a :Tuple = hidden_size
a :Any = num_hidden_layers
a :Any = num_attention_heads
a :List[Any] = hidden_act
a :Tuple = intermediate_size
a :Any = hidden_dropout_prob
a :int = attention_probs_dropout_prob
a :Dict = max_position_embeddings
a :int = initializer_range
a :Union[str, Any] = layer_norm_eps
a :str = position_embedding_type
a :int = use_cache
| 281 | 0 |
"""simple docstring"""
_a = 8.314_4598
def lowerCamelCase__ ( __snake_case, __snake_case ) -> float:
"""simple docstring"""
if temperature < 0:
raise Exception('''Temperature cannot be less than 0 K''' )
if molar_mass <= 0:
raise Exception('''Molar mass cannot be less than or equal to 0 kg/mol''' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_a = 300
_a = 28
_a = rms_speed_of_molecule(temperature, molar_mass)
print(F"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
| 194 |
"""simple docstring"""
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowerCamelCase__ ( __snake_case ) -> Tuple:
"""simple docstring"""
return 1 / (1 + np.exp(-z ))
def lowerCamelCase__ ( __snake_case, __snake_case ) -> List[str]:
"""simple docstring"""
return (-y * np.log(__snake_case ) - (1 - y) * np.log(1 - h )).mean()
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> int:
"""simple docstring"""
_UpperCamelCase = np.dot(__snake_case, __snake_case )
return np.sum(y * scores - np.log(1 + np.exp(__snake_case ) ) )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case=7_00_00 ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = np.zeros(x.shape[1] )
for iterations in range(__snake_case ):
_UpperCamelCase = np.dot(__snake_case, __snake_case )
_UpperCamelCase = sigmoid_function(__snake_case )
_UpperCamelCase = np.dot(x.T, h - y ) / y.size
_UpperCamelCase = theta - alpha * gradient # updating the weights
_UpperCamelCase = np.dot(__snake_case, __snake_case )
_UpperCamelCase = sigmoid_function(__snake_case )
_UpperCamelCase = cost_function(__snake_case, __snake_case )
if iterations % 1_00 == 0:
print(F'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
_a = datasets.load_iris()
_a = iris.data[:, :2]
_a = (iris.target != 0) * 1
_a = 0.1
_a = logistic_reg(alpha, x, y, max_iterations=7_0000)
print("""theta: """, theta) # printing the theta i.e our weights vector
def lowerCamelCase__ ( __snake_case ) -> Tuple:
"""simple docstring"""
return sigmoid_function(
np.dot(__snake_case, __snake_case ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="""b""", label="""0""")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="""r""", label="""1""")
((_a) , (_a)) = (x[:, 0].min(), x[:, 0].max())
((_a) , (_a)) = (x[:, 1].min(), x[:, 1].max())
((_a) , (_a)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
_a = np.c_[xxa.ravel(), xxa.ravel()]
_a = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="""black""")
plt.legend()
plt.show()
| 194 | 1 |
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = ""
for i in table:
res += inp[i - 1]
return res
def lowerCamelCase_ ( lowerCamelCase__ ):
return data[1:] + data[0]
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = ""
for i in range(len(lowerCamelCase__ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = int("0b" + data[0] + data[-1] , 2 )
lowerCamelCase_ = int("0b" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = message[:4]
lowerCamelCase_ = message[4:]
lowerCamelCase_ = apply_table(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = xor(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = apply_sbox(lowerCamelCase__ , temp[:4] ) # noqa: E741
lowerCamelCase_ = apply_sbox(lowerCamelCase__ , temp[4:] )
lowerCamelCase_ = "0" * (2 - len(lowerCamelCase__ )) + l # noqa: E741
lowerCamelCase_ = "0" * (2 - len(lowerCamelCase__ )) + r
lowerCamelCase_ = apply_table(l + r , lowerCamelCase__ )
lowerCamelCase_ = xor(lowerCamelCase__ , lowerCamelCase__ )
return temp + right
if __name__ == "__main__":
__A =input('''Enter 10 bit key: ''')
__A =input('''Enter 8 bit message: ''')
__A =[6, 3, 7, 4, 8, 5, 1_0, 9]
__A =[3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6]
__A =[2, 4, 3, 1]
__A =[2, 6, 3, 1, 4, 8, 5, 7]
__A =[4, 1, 3, 5, 7, 2, 8, 6]
__A =[4, 1, 2, 3, 2, 3, 4, 1]
__A =[[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
__A =[[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
__A =apply_table(key, paa_table)
__A =temp[:5]
__A =temp[5:]
__A =left_shift(left)
__A =left_shift(right)
__A =apply_table(left + right, pa_table)
__A =left_shift(left)
__A =left_shift(right)
__A =left_shift(left)
__A =left_shift(right)
__A =apply_table(left + right, pa_table)
# encryption
__A =apply_table(message, IP)
__A =function(expansion, sa, sa, keya, temp)
__A =temp[4:] + temp[:4]
__A =function(expansion, sa, sa, keya, temp)
__A =apply_table(temp, IP_inv)
print('''Cipher text is:''', CT)
# decryption
__A =apply_table(CT, IP)
__A =function(expansion, sa, sa, keya, temp)
__A =temp[4:] + temp[:4]
__A =function(expansion, sa, sa, keya, temp)
__A =apply_table(temp, IP_inv)
print('''Plain text after decypting is:''', PT)
| 47 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__A ={
'''configuration_bridgetower''': [
'''BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BridgeTowerConfig''',
'''BridgeTowerTextConfig''',
'''BridgeTowerVisionConfig''',
],
'''processing_bridgetower''': ['''BridgeTowerProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''BridgeTowerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BridgeTowerForContrastiveLearning''',
'''BridgeTowerForImageAndTextRetrieval''',
'''BridgeTowerForMaskedLM''',
'''BridgeTowerModel''',
'''BridgeTowerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 47 | 1 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__snake_case = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
__snake_case = {'''facebook/blenderbot_small-90M''': 5_12}
def lowerCAmelCase_ ( __lowerCAmelCase )-> int:
'''simple docstring'''
UpperCAmelCase : Optional[int] =set()
UpperCAmelCase : Optional[int] =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase : List[Any] =char
UpperCAmelCase : Optional[int] =set(_A )
return pairs
class __snake_case ( lowerCAmelCase__ ):
__lowerCamelCase : str = VOCAB_FILES_NAMES
__lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Any = ['''input_ids''', '''attention_mask''']
def __init__( self , snake_case__ , snake_case__ , snake_case__="__start__" , snake_case__="__end__" , snake_case__="__unk__" , snake_case__="__null__" , **snake_case__ , ) -> List[Any]:
'''simple docstring'''
super().__init__(unk_token=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , **__lowerCAmelCase )
with open(__lowerCAmelCase , encoding='''utf-8''' ) as vocab_handle:
UpperCAmelCase : Optional[int] =json.load(__lowerCAmelCase )
UpperCAmelCase : Optional[Any] ={v: k for k, v in self.encoder.items()}
with open(__lowerCAmelCase , encoding='''utf-8''' ) as merges_handle:
UpperCAmelCase : Any =merges_handle.read().split('''\n''' )[1:-1]
UpperCAmelCase : Optional[int] =[tuple(merge.split() ) for merge in merges]
UpperCAmelCase : List[str] =dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
UpperCAmelCase : str ={}
@property
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return len(self.encoder )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase__ ( self , snake_case__ ) -> str:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
UpperCAmelCase : str =re.sub('''([.,!?()])''' , r''' \1''' , __lowerCAmelCase )
UpperCAmelCase : List[Any] =re.sub('''(\')''' , r''' \1 ''' , __lowerCAmelCase )
UpperCAmelCase : List[Any] =re.sub(r'''\s{2,}''' , ''' ''' , __lowerCAmelCase )
if "\n" in token:
UpperCAmelCase : int =token.replace('''\n''' , ''' __newln__''' )
UpperCAmelCase : Dict =token.split(''' ''' )
UpperCAmelCase : Union[str, Any] =[]
for token in tokens:
if not len(__lowerCAmelCase ):
continue
UpperCAmelCase : Optional[Any] =token.lower()
UpperCAmelCase : Any =tuple(__lowerCAmelCase )
UpperCAmelCase : List[Any] =tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
UpperCAmelCase : str =get_pairs(__lowerCAmelCase )
if not pairs:
words.append(__lowerCAmelCase )
continue
while True:
UpperCAmelCase : Union[str, Any] =min(__lowerCAmelCase , key=lambda snake_case__ : self.bpe_ranks.get(__lowerCAmelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase , UpperCAmelCase : Any =bigram
UpperCAmelCase : Union[str, Any] =[]
UpperCAmelCase : Tuple =0
while i < len(__lowerCAmelCase ):
try:
UpperCAmelCase : Optional[int] =word.index(__lowerCAmelCase , __lowerCAmelCase )
new_word.extend(word[i:j] )
UpperCAmelCase : Optional[Any] =j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(__lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase : Optional[int] =tuple(__lowerCAmelCase )
UpperCAmelCase : Optional[int] =new_word
if len(__lowerCAmelCase ) == 1:
break
else:
UpperCAmelCase : Tuple =get_pairs(__lowerCAmelCase )
UpperCAmelCase : List[Any] ='''@@ '''.join(__lowerCAmelCase )
UpperCAmelCase : Any =word[:-4]
UpperCAmelCase : List[Any] =word
words.append(__lowerCAmelCase )
return " ".join(__lowerCAmelCase )
def UpperCAmelCase__ ( self , snake_case__ ) -> Dict:
'''simple docstring'''
UpperCAmelCase : int =[]
UpperCAmelCase : int =re.findall(r'''\S+\n?''' , __lowerCAmelCase )
for token in words:
split_tokens.extend(list(self.bpe(__lowerCAmelCase ).split(''' ''' ) ) )
return split_tokens
def UpperCAmelCase__ ( self , snake_case__ ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : int =token.lower()
return self.encoder.get(__lowerCAmelCase , self.encoder.get(self.unk_token ) )
def UpperCAmelCase__ ( self , snake_case__ ) -> Tuple:
'''simple docstring'''
return self.decoder.get(__lowerCAmelCase , self.unk_token )
def UpperCAmelCase__ ( self , snake_case__ ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =''' '''.join(__lowerCAmelCase ).replace('''@@ ''' , '''''' ).strip()
return out_string
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> str:
'''simple docstring'''
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase : Optional[Any] =os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase : int =os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCAmelCase , ensure_ascii=__lowerCAmelCase ) + '''\n''' )
UpperCAmelCase : Tuple =0
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
UpperCAmelCase : str =token_index
writer.write(''' '''.join(__lowerCAmelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
| 348 | '''simple docstring'''
def snake_case__ ( _A: str ) -> list[int]:
'''simple docstring'''
lowerCAmelCase = [0 for i in range(len(_A ) )]
# initialize interval's left pointer and right pointer
lowerCAmelCase , lowerCAmelCase = 0, 0
for i in range(1 , len(_A ) ):
# case when current index is inside the interval
if i <= right_pointer:
lowerCAmelCase = min(right_pointer - i + 1 , z_result[i - left_pointer] )
lowerCAmelCase = min_edge
while go_next(_A , _A , _A ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
lowerCAmelCase , lowerCAmelCase = i, i + z_result[i] - 1
return z_result
def snake_case__ ( _A: int , _A: list[int] , _A: str ) -> bool:
'''simple docstring'''
return i + z_result[i] < len(_A ) and s[z_result[i]] == s[i + z_result[i]]
def snake_case__ ( _A: str , _A: str ) -> int:
'''simple docstring'''
lowerCAmelCase = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
lowerCAmelCase = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_A ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 272 | 0 |
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
A : str = IFInpaintingSuperResolutionPipeline
A : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
A : Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} )
A : List[Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _lowerCAmelCase ( self ) -> List[Any]:
return self._get_superresolution_dummy_components()
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ) -> Optional[int]:
if str(_SCREAMING_SNAKE_CASE ).startswith("mps" ):
snake_case_ : Any = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
snake_case_ : Optional[int] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[int] = floats_tensor((1, 3, 16, 16) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
snake_case_ : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _lowerCAmelCase ( self ) -> Tuple:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def _lowerCAmelCase ( self ) -> Dict:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def _lowerCAmelCase ( self ) -> List[Any]:
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _lowerCAmelCase ( self ) -> Tuple:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _lowerCAmelCase ( self ) -> Tuple:
self._test_save_load_local()
def _lowerCAmelCase ( self ) -> Any:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 363 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase : Dict = logging.get_logger(__name__)
lowercase : Tuple = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase : Optional[int] = {
'''vocab_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/vocab.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/vocab.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/vocab.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/vocab.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/vocab.json''',
},
'''merges_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/merges.txt''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/merges.txt''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/merges.txt''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/merges.txt''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/tokenizer.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/tokenizer.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/tokenizer.json''',
},
}
lowercase : Dict = {
'''gpt2''': 10_24,
'''gpt2-medium''': 10_24,
'''gpt2-large''': 10_24,
'''gpt2-xl''': 10_24,
'''distilgpt2''': 10_24,
}
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Union[str, Any] = VOCAB_FILES_NAMES
A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Optional[Any] = ['input_ids', 'attention_mask']
A : Dict = GPTaTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="<|endoftext|>" , _SCREAMING_SNAKE_CASE="<|endoftext|>" , _SCREAMING_SNAKE_CASE="<|endoftext|>" , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ) -> Dict:
super().__init__(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
snake_case_ : Tuple = kwargs.pop("add_bos_token" , _SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , _SCREAMING_SNAKE_CASE ) != add_prefix_space:
snake_case_ : Optional[int] = getattr(_SCREAMING_SNAKE_CASE , pre_tok_state.pop("type" ) )
snake_case_ : int = add_prefix_space
snake_case_ : str = pre_tok_class(**_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = add_prefix_space
def _lowerCAmelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> BatchEncoding:
snake_case_ : Dict = kwargs.get("is_split_into_words" , _SCREAMING_SNAKE_CASE )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> BatchEncoding:
snake_case_ : Dict = kwargs.get("is_split_into_words" , _SCREAMING_SNAKE_CASE )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
snake_case_ : str = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> List[int]:
snake_case_ : List[str] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) + [self.eos_token_id] )
if len(_SCREAMING_SNAKE_CASE ) > self.model_max_length:
snake_case_ : Dict = input_ids[-self.model_max_length :]
return input_ids
| 36 | 0 |
"""simple docstring"""
import operator as op
__A : str = "scaler.pt"
__A : List[Any] = "pytorch_model"
__A : Tuple = "random_states"
__A : List[Any] = "optimizer"
__A : str = "scheduler"
__A : int = "pytorch_model.bin"
__A : Union[str, Any] = "pytorch_model.bin.index.json"
__A : Union[str, Any] = "model.safetensors"
__A : List[Any] = "model.safetensors.index.json"
__A : Any = "1.10.2"
__A : List[Any] = "py38"
__A : Dict = "4.17.0"
__A : Union[str, Any] = ["ml.p3.16xlarge", "ml.p3dn.24xlarge", "ml.p4dn.24xlarge"]
__A : List[str] = ["FULL_SHARD", "SHARD_GRAD_OP", "NO_SHARD", "HYBRID_SHARD", "HYBRID_SHARD_ZERO2"]
__A : Any = ["TRANSFORMER_BASED_WRAP", "SIZE_BASED_WRAP", "NO_WRAP"]
__A : Optional[Any] = ["BACKWARD_PRE", "BACKWARD_POST", "NO_PREFETCH"]
__A : Optional[int] = ["FULL_STATE_DICT", "LOCAL_STATE_DICT", "SHARDED_STATE_DICT"]
__A : List[str] = "2.0.1"
__A : List[Any] = ["pdsh", "standard", "openmpi", "mvapich"]
__A : int = ["default", "reduce-overhead", "max-autotune"]
__A : Optional[Any] = {">": op.gt, ">=": op.ge, "==": op.eq, "!=": op.ne, "<=": op.le, "<": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
__A : int = [
"nnodes",
"nproc_per_node",
"rdzv_backend",
"rdzv_endpoint",
"rdzv_id",
"rdzv_conf",
"standalone",
"max_restarts",
"monitor_interval",
"start_method",
"role",
"module",
"m",
"no_python",
"run_path",
"log_dir",
"r",
"redirects",
"t",
"tee",
"node_rank",
"master_addr",
"master_port",
]
__A : Union[str, Any] = ["DEEPSPEED", "MULTI_GPU", "FSDP", "MEGATRON_LM"]
__A : Dict = ["DEEPSPEED", "MULTI_XPU", "FSDP"]
| 260 | """simple docstring"""
from __future__ import annotations
from cmath import sqrt
def lowerCAmelCase__ ( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int ) -> tuple[complex, complex]:
"""simple docstring"""
if a == 0:
raise ValueError('Coefficient \'a\' must not be zero.' )
snake_case = b * b - 4 * a * c
snake_case = (-b + sqrt(_UpperCamelCase )) / (2 * a)
snake_case = (-b - sqrt(_UpperCamelCase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def lowerCAmelCase__ ( ) -> int:
"""simple docstring"""
snake_case ,snake_case = quadratic_roots(a=5 , b=6 , c=1 )
print(f"""The solutions are: {solutiona} and {solutiona}""" )
if __name__ == "__main__":
main()
| 150 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import ceil, floor, sqrt
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] = 200_0000 )-> int:
'''simple docstring'''
UpperCAmelCase__ : Tuple = [0]
UpperCAmelCase__ : Dict = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
UpperCAmelCase__ : Any = 0
# the area corresponding to the grid that gives the product closest to target
UpperCAmelCase__ : List[Any] = 0
# an estimate of b, using the quadratic formula
UpperCAmelCase__ : Dict = 42
# the largest integer less than b_estimate
UpperCAmelCase__ : int = 42
# the largest integer less than b_estimate
UpperCAmelCase__ : Any = 42
# the triangle number corresponding to b_floor
UpperCAmelCase__ : Tuple = 42
# the triangle number corresponding to b_ceil
UpperCAmelCase__ : List[Any] = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
UpperCAmelCase__ : Optional[int] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
UpperCAmelCase__ : Union[str, Any] = floor(__snake_case )
UpperCAmelCase__ : Tuple = ceil(__snake_case )
UpperCAmelCase__ : Optional[Any] = triangle_numbers[b_floor]
UpperCAmelCase__ : List[Any] = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
UpperCAmelCase__ : Dict = triangle_b_first_guess * triangle_a
UpperCAmelCase__ : Optional[int] = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
UpperCAmelCase__ : int = triangle_b_second_guess * triangle_a
UpperCAmelCase__ : List[str] = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F"""{solution() = }""")
| 362 |
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__ :
def __init__( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Dict=1_3 , snake_case__ : List[str]=7 , snake_case__ : Union[str, Any]=True , snake_case__ : Tuple=True , snake_case__ : Optional[int]=True , snake_case__ : Any=True , snake_case__ : Any=9_9 , snake_case__ : List[Any]=1_6 , snake_case__ : Any=3_6 , snake_case__ : Union[str, Any]=6 , snake_case__ : Tuple=6 , snake_case__ : List[str]=6 , snake_case__ : List[str]=3_7 , snake_case__ : Dict="gelu" , snake_case__ : int=0.1 , snake_case__ : List[Any]=0.1 , snake_case__ : List[str]=5_1_2 , snake_case__ : Dict=1_6 , snake_case__ : str=2 , snake_case__ : Optional[Any]=0.02 , snake_case__ : List[str]=3 , snake_case__ : Any=4 , snake_case__ : int=None , ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = parent
UpperCAmelCase__ : int = batch_size
UpperCAmelCase__ : int = seq_length
UpperCAmelCase__ : List[str] = is_training
UpperCAmelCase__ : Union[str, Any] = use_input_mask
UpperCAmelCase__ : Optional[Any] = use_token_type_ids
UpperCAmelCase__ : Any = use_labels
UpperCAmelCase__ : List[Any] = vocab_size
UpperCAmelCase__ : Any = embedding_size
UpperCAmelCase__ : List[str] = hidden_size
UpperCAmelCase__ : List[Any] = num_hidden_layers
UpperCAmelCase__ : int = num_hidden_groups
UpperCAmelCase__ : Union[str, Any] = num_attention_heads
UpperCAmelCase__ : List[str] = intermediate_size
UpperCAmelCase__ : Optional[Any] = hidden_act
UpperCAmelCase__ : List[Any] = hidden_dropout_prob
UpperCAmelCase__ : Tuple = attention_probs_dropout_prob
UpperCAmelCase__ : str = max_position_embeddings
UpperCAmelCase__ : Any = type_vocab_size
UpperCAmelCase__ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase__ : Union[str, Any] = initializer_range
UpperCAmelCase__ : Tuple = num_labels
UpperCAmelCase__ : List[str] = num_choices
UpperCAmelCase__ : Union[str, Any] = scope
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : Optional[int] = None
if self.use_input_mask:
UpperCAmelCase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : Optional[int] = None
if self.use_token_type_ids:
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Any = None
if self.use_labels:
UpperCAmelCase__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self : Any ):
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def __a ( self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : str = AlbertModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
UpperCAmelCase__ : Optional[Any] = model(snake_case__ , token_type_ids=snake_case__ )
UpperCAmelCase__ : Optional[int] = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __a ( self : Dict , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : int ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = AlbertForPreTraining(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Dict = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , sentence_order_label=snake_case__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def __a ( self : Union[str, Any] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = AlbertForMaskedLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = AlbertForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : List[str] = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self : Dict , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.num_labels
UpperCAmelCase__ : int = AlbertForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : int = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self : str , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : str = self.num_labels
UpperCAmelCase__ : Any = AlbertForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : List[str] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self : Any , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.num_choices
UpperCAmelCase__ : Optional[Any] = AlbertForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : Tuple = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Optional[Any] = config_and_inputs
UpperCAmelCase__ : Optional[int] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =(
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ =(
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ =True
def __a ( self : Tuple , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Optional[int]=False ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class in get_values(snake_case__ ):
UpperCAmelCase__ : List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case__ )
UpperCAmelCase__ : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
return inputs_dict
def __a ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Dict = AlbertModelTester(self )
UpperCAmelCase__ : Any = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def __a ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case__ )
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def __a ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case__ )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case__ )
def __a ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase__ : Dict = type
self.model_tester.create_and_check_model(*snake_case__ )
@slow
def __a ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : List[str] = AlbertModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = AlbertModel.from_pretrained("albert-base-v2" )
UpperCAmelCase__ : Dict = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
UpperCAmelCase__ : List[str] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase__ : int = model(snake_case__ , attention_mask=snake_case__ )[0]
UpperCAmelCase__ : Dict = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , snake_case__ )
UpperCAmelCase__ : Union[str, Any] = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case__ , atol=1e-4 ) )
| 298 | 0 |
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = False , snake_case = False , snake_case = None , **snake_case , ):
lowercase = path_or_paths
lowercase = split if split or isinstance(snake_case , snake_case ) else 'train'
lowercase = features
lowercase = cache_dir
lowercase = keep_in_memory
lowercase = streaming
lowercase = num_proc
lowercase = kwargs
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self ):
pass
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self , snake_case = None , snake_case = None , snake_case = False , snake_case = False , snake_case = None , **snake_case , ):
lowercase = features
lowercase = cache_dir
lowercase = keep_in_memory
lowercase = streaming
lowercase = num_proc
lowercase = kwargs
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self ):
pass
| 195 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
UpperCAmelCase = WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = test_results.split(' ' )
lowercase = 0
lowercase = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
lowercase = expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(__SCREAMING_SNAKE_CASE ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = {}
lowercase = None
lowercase = False
for line in failures_short_lines.split('\n' ):
if re.search(r'_ \[doctest\]' , __SCREAMING_SNAKE_CASE ):
lowercase = True
lowercase = line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
lowercase = line
lowercase = False
return failures
class A_ :
'''simple docstring'''
def __init__( self , snake_case , snake_case ):
lowercase = title
lowercase = doc_test_results['time_spent'].split(',' )[0]
lowercase = doc_test_results['success']
lowercase = doc_test_results['failures']
lowercase = self.n_success + self.n_failures
# Failures and success of the modeling tests
lowercase = doc_test_results
@property
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [self._time_spent]
lowercase = 0
for time in time_spent:
lowercase = time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(snake_case ) == 1:
lowercase = [0, 0, time_parts[0]]
lowercase , lowercase , lowercase = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
lowercase , lowercase , lowercase = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return F'''{int(snake_case )}h{int(snake_case )}m{int(snake_case )}s'''
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'''
F''' {self.time}.'''
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
@property
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = 40
lowercase = {k: v['failed'] for k, v in doc_test_results.items() if isinstance(snake_case , snake_case )}
lowercase = ''
for category, failures in category_failures.items():
if len(snake_case ) == 0:
continue
if report != "":
report += "\n\n"
report += F'''*{category} failures*:'''.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(snake_case )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'''The following examples had failures:\n\n\n{report}\n''',
},
}
@property
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(snake_case )
@staticmethod
def SCREAMING_SNAKE_CASE__ ( ):
lowercase = [
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': F'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(snake_case )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=snake_case , )
def SCREAMING_SNAKE_CASE__ ( self ):
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
lowercase = F'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else 'All tests passed.'
lowercase = client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=snake_case , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case ):
lowercase = ''
for key, value in failures.items():
lowercase = value[:200] + ' [Truncated]' if len(snake_case ) > 250 else value
failures_text += F'''*{key}*\n_{value}_\n\n'''
lowercase = job_name
lowercase = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
lowercase = {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def SCREAMING_SNAKE_CASE__ ( self ):
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
lowercase = self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
lowercase = sorted(self.doc_test_results.items() , key=lambda snake_case : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
lowercase = F'''*Num failures* :{len(job_result['failed'] )} \n'''
lowercase = job_result['failures']
lowercase = self.get_reply_blocks(snake_case , snake_case , snake_case , text=snake_case )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=F'''Results for {job}''' , blocks=snake_case , thread_ts=self.thread_ts['ts'] , )
time.sleep(1 )
def UpperCAmelCase_ ( ):
lowercase = os.environ['GITHUB_RUN_ID']
lowercase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'''
lowercase = requests.get(__SCREAMING_SNAKE_CASE ).json()
lowercase = {}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
lowercase = math.ceil((result['total_count'] - 100) / 100 )
for i in range(__SCREAMING_SNAKE_CASE ):
lowercase = requests.get(url + F'''&page={i + 2}''' ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.' , __SCREAMING_SNAKE_CASE )
return {}
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = {}
if os.path.exists(__SCREAMING_SNAKE_CASE ):
lowercase = os.listdir(__SCREAMING_SNAKE_CASE )
for file in files:
try:
with open(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , encoding='utf-8' ) as f:
lowercase = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'''Could not open {os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}.''' ) from e
return _artifact
def UpperCAmelCase_ ( ):
class A_ :
'''simple docstring'''
def __init__( self , snake_case ):
lowercase = name
lowercase = []
def __str__( self ):
return self.name
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
self.paths.append({'name': self.name, 'path': path} )
lowercase = {}
lowercase = filter(os.path.isdir , os.listdir() )
for directory in directories:
lowercase = directory
if artifact_name not in _available_artifacts:
lowercase = Artifact(__SCREAMING_SNAKE_CASE )
_available_artifacts[artifact_name].add_path(__SCREAMING_SNAKE_CASE )
return _available_artifacts
if __name__ == "__main__":
UpperCAmelCase = get_job_links()
UpperCAmelCase = retrieve_available_artifacts()
UpperCAmelCase = collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
UpperCAmelCase = {
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
UpperCAmelCase = github_actions_job_links.get('''run_doctests''')
UpperCAmelCase = available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
UpperCAmelCase = retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = handle_test_results(artifact['''stats'''])
UpperCAmelCase = failed
UpperCAmelCase = success
UpperCAmelCase = time_spent[1:-1] + ''', '''
UpperCAmelCase = extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
UpperCAmelCase = line.replace('''FAILED ''', '''''')
UpperCAmelCase = line.split()[0].replace('''\n''', '''''')
if "::" in line:
UpperCAmelCase , UpperCAmelCase = line.split('''::''')
else:
UpperCAmelCase , UpperCAmelCase = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
UpperCAmelCase = docs[file_regex]
doc_test_results[category]["failed"].append(test)
UpperCAmelCase = all_failures[test] if test in all_failures else '''N/A'''
UpperCAmelCase = failure
break
UpperCAmelCase = Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply()
| 195 | 1 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = ['image_processor', 'tokenizer']
lowercase__ = 'AutoImageProcessor'
lowercase__ = 'AutoTokenizer'
def __init__( self , __a=None , __a=None , **__a) -> Dict:
'''simple docstring'''
_UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __a , )
_UpperCamelCase = kwargs.pop('''feature_extractor''')
_UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''')
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''')
super().__init__(__a , __a)
_UpperCamelCase = self.image_processor
_UpperCamelCase = False
def __call__( self , *__a , **__a) -> Dict:
'''simple docstring'''
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__a , **__a)
_UpperCamelCase = kwargs.pop('''images''' , __a)
_UpperCamelCase = kwargs.pop('''text''' , __a)
if len(__a) > 0:
_UpperCamelCase = args[0]
_UpperCamelCase = args[1:]
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''')
if images is not None:
_UpperCamelCase = self.image_processor(__a , *__a , **__a)
if text is not None:
_UpperCamelCase = self.tokenizer(__a , **__a)
if text is None:
return inputs
elif images is None:
return encodings
else:
_UpperCamelCase = encodings['''input_ids''']
return inputs
def UpperCAmelCase ( self , *__a , **__a) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.batch_decode(*__a , **__a)
def UpperCAmelCase ( self , *__a , **__a) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.decode(*__a , **__a)
@contextmanager
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your images inputs, or in a separate call.''')
_UpperCamelCase = True
_UpperCamelCase = self.tokenizer
yield
_UpperCamelCase = self.image_processor
_UpperCamelCase = False
def UpperCAmelCase ( self , __a , __a=False , __a=None) -> Dict:
'''simple docstring'''
if added_vocab is None:
_UpperCamelCase = self.tokenizer.get_added_vocab()
_UpperCamelCase = {}
while tokens:
_UpperCamelCase = re.search(R'''<s_(.*?)>''' , __a , re.IGNORECASE)
if start_token is None:
break
_UpperCamelCase = start_token.group(1)
_UpperCamelCase = re.search(RF'''</s_{key}>''' , __a , re.IGNORECASE)
_UpperCamelCase = start_token.group()
if end_token is None:
_UpperCamelCase = tokens.replace(__a , '''''')
else:
_UpperCamelCase = end_token.group()
_UpperCamelCase = re.escape(__a)
_UpperCamelCase = re.escape(__a)
_UpperCamelCase = re.search(F'''{start_token_escaped}(.*?){end_token_escaped}''' , __a , re.IGNORECASE)
if content is not None:
_UpperCamelCase = content.group(1).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
_UpperCamelCase = self.tokenajson(__a , is_inner_value=__a , added_vocab=__a)
if value:
if len(__a) == 1:
_UpperCamelCase = value[0]
_UpperCamelCase = value
else: # leaf nodes
_UpperCamelCase = []
for leaf in content.split(R'''<sep/>'''):
_UpperCamelCase = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
_UpperCamelCase = leaf[1:-2] # for categorical special tokens
output[key].append(__a)
if len(output[key]) == 1:
_UpperCamelCase = output[key][0]
_UpperCamelCase = tokens[tokens.find(__a) + len(__a) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=__a , added_vocab=__a)
if len(__a):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __a , )
return self.image_processor_class
@property
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __a , )
return self.image_processor
| 100 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_a = logging.get_logger(__name__)
_a = {
"""facebook/detr-resnet-50""": """https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json""",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'detr'
lowercase__ = ['past_key_values']
lowercase__ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , __a=True , __a=None , __a=3 , __a=1_00 , __a=6 , __a=20_48 , __a=8 , __a=6 , __a=20_48 , __a=8 , __a=0.0 , __a=0.0 , __a=True , __a="relu" , __a=2_56 , __a=0.1 , __a=0.0 , __a=0.0 , __a=0.02 , __a=1.0 , __a=False , __a="sine" , __a="resnet50" , __a=True , __a=False , __a=1 , __a=5 , __a=2 , __a=1 , __a=1 , __a=5 , __a=2 , __a=0.1 , **__a , ) -> Tuple:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''')
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''')
_UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''])
elif isinstance(__a , __a):
_UpperCamelCase = backbone_config.get('''model_type''')
_UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase = config_class.from_dict(__a)
# set timm attributes to None
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None, None, None
_UpperCamelCase = use_timm_backbone
_UpperCamelCase = backbone_config
_UpperCamelCase = num_channels
_UpperCamelCase = num_queries
_UpperCamelCase = d_model
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = init_xavier_std
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = decoder_layerdrop
_UpperCamelCase = encoder_layers
_UpperCamelCase = auxiliary_loss
_UpperCamelCase = position_embedding_type
_UpperCamelCase = backbone
_UpperCamelCase = use_pretrained_backbone
_UpperCamelCase = dilation
# Hungarian matcher
_UpperCamelCase = class_cost
_UpperCamelCase = bbox_cost
_UpperCamelCase = giou_cost
# Loss coefficients
_UpperCamelCase = mask_loss_coefficient
_UpperCamelCase = dice_loss_coefficient
_UpperCamelCase = bbox_loss_coefficient
_UpperCamelCase = giou_loss_coefficient
_UpperCamelCase = eos_coefficient
super().__init__(is_encoder_decoder=__a , **__a)
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return self.d_model
@classmethod
def UpperCAmelCase ( cls , __a , **__a) -> Any:
'''simple docstring'''
return cls(backbone_config=__a , **__a)
def UpperCAmelCase ( self) -> Dict[str, any]:
'''simple docstring'''
_UpperCamelCase = copy.deepcopy(self.__dict__)
if output["backbone_config"] is not None:
_UpperCamelCase = self.backbone_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = version.parse('1.11' )
@property
def UpperCAmelCase ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
])
@property
def UpperCAmelCase ( self) -> float:
'''simple docstring'''
return 1e-5
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return 12
| 100 | 1 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowercase_ ( unittest.TestCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase=1_3 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=9_9 , __UpperCamelCase=3_2 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=3_7 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=5_1_2 , __UpperCamelCase=1_6 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=4 , ):
"""simple docstring"""
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = seq_length
UpperCamelCase_ = is_training
UpperCamelCase_ = use_attention_mask
UpperCamelCase_ = use_token_type_ids
UpperCamelCase_ = use_labels
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = type_vocab_size
UpperCamelCase_ = type_sequence_label_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = num_choices
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ = None
if self.use_attention_mask:
UpperCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_ = None
if self.use_token_type_ids:
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase_ = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = config_and_inputs
UpperCamelCase_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class lowercase_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__ : List[str] = True
A__ : Optional[Any] = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = FlaxRoFormerModelTester(self )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCamelCase_ = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=__UpperCamelCase )
UpperCamelCase_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(__UpperCamelCase )
@require_flax
class lowercase_ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
UpperCamelCase_ = jnp.array([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase_ = model(__UpperCamelCase )[0]
UpperCamelCase_ = 5_0_0_0_0
UpperCamelCase_ = (1, 6, vocab_size)
self.assertEqual(output.shape , __UpperCamelCase )
UpperCamelCase_ = jnp.array(
[[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , __UpperCamelCase , atol=1e-4 ) )
| 122 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class lowercase_ :
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
return None
class lowercase_ :
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
return None
class lowercase_ ( unittest.TestCase ):
A__ : Union[str, Any] = [
# (model_name, model_kwargs)
("""bert-base-cased""", {}),
("""gpt2""", {"""use_cache""": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__UpperCamelCase , """tf""" , 1_2 , **__UpperCamelCase )
@require_torch
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__UpperCamelCase , """pt""" , 1_2 , **__UpperCamelCase )
@require_torch
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
from transformers import BertModel
UpperCamelCase_ = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode="""w+t""" ) as vocab_file:
vocab_file.write("""\n""".join(__UpperCamelCase ) )
vocab_file.flush()
UpperCamelCase_ = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
UpperCamelCase_ = BertModel(BertConfig(vocab_size=len(__UpperCamelCase ) ) )
model.save_pretrained(__UpperCamelCase )
self._test_export(__UpperCamelCase , """pt""" , 1_2 , __UpperCamelCase )
@require_tf
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
UpperCamelCase_ = self._test_export(__UpperCamelCase , """tf""" , 1_2 , **__UpperCamelCase )
UpperCamelCase_ = quantize(Path(__UpperCamelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
@require_torch
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
UpperCamelCase_ = self._test_export(__UpperCamelCase , """pt""" , 1_2 , **__UpperCamelCase )
UpperCamelCase_ = quantize(__UpperCamelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , **__UpperCamelCase ):
"""simple docstring"""
try:
# Compute path
with TemporaryDirectory() as tempdir:
UpperCamelCase_ = Path(__UpperCamelCase ).joinpath("""model.onnx""" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )
return path
except Exception as e:
self.fail(__UpperCamelCase )
@require_torch
@require_tokenizers
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
from transformers import BertModel
UpperCamelCase_ = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
UpperCamelCase_ = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__UpperCamelCase , __UpperCamelCase , """pt""" )
@require_tf
@require_tokenizers
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
from transformers import TFBertModel
UpperCamelCase_ = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
UpperCamelCase_ = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__UpperCamelCase , __UpperCamelCase , """tf""" )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = FeatureExtractionPipeline(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = infer_shapes(__UpperCamelCase , __UpperCamelCase )
# Assert all variables are present
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __UpperCamelCase )
self.assertSequenceEqual(variable_names[3:] , __UpperCamelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""} )
self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""} )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = ["""input_ids""", """attention_mask""", """token_type_ids"""]
UpperCamelCase_ = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
UpperCamelCase_ , UpperCamelCase_ = ensure_valid_input(FuncContiguousArgs() , __UpperCamelCase , __UpperCamelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__UpperCamelCase ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__UpperCamelCase ) , set(__UpperCamelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__UpperCamelCase , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
UpperCamelCase_ , UpperCamelCase_ = ensure_valid_input(FuncNonContiguousArgs() , __UpperCamelCase , __UpperCamelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__UpperCamelCase ) , 1 )
self.assertEqual(len(__UpperCamelCase ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["""input_ids"""] )
self.assertEqual(ordered_input_names[0] , """input_ids""" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) , """-test""" )
self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix() )
| 122 | 1 |
from string import ascii_uppercase
UpperCamelCase__ = {char: i for i, char in enumerate(ascii_uppercase)}
UpperCamelCase__ = dict(enumerate(ascii_uppercase))
def _UpperCamelCase (a__ :str , a__ :str ):
"""simple docstring"""
UpperCamelCase__ = len(a__ )
UpperCamelCase__ = 0
while True:
if x == i:
UpperCamelCase__ = 0
if len(a__ ) == len(a__ ):
break
key += key[i]
i += 1
return key
def _UpperCamelCase (a__ :str , a__ :str ):
"""simple docstring"""
UpperCamelCase__ = """"""
UpperCamelCase__ = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
UpperCamelCase__ = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def _UpperCamelCase (a__ :str , a__ :str ):
"""simple docstring"""
UpperCamelCase__ = """"""
UpperCamelCase__ = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
UpperCamelCase__ = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def _UpperCamelCase ():
"""simple docstring"""
UpperCamelCase__ = """THE GERMAN ATTACK"""
UpperCamelCase__ = """SECRET"""
UpperCamelCase__ = generate_key(a__ , a__ )
UpperCamelCase__ = cipher_text(a__ , a__ )
print(f"""Encrypted Text = {s}""" )
print(f"""Original Text = {original_text(a__ , a__ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 87 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCamelCase__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ["GPTSw3Tokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 87 | 1 |
def UpperCamelCase_( ) -> Dict:
_lowercase : Union[str, Any] = []
_lowercase : Optional[Any] = 1
while len(lowerCamelCase_ ) < 1e6:
constant.append(str(lowerCamelCase_ ) )
i += 1
_lowercase : int = ''.join(lowerCamelCase_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 21 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _lowerCamelCase( _a, unittest.TestCase ):
lowercase_ : Union[str, Any] = ConsistencyModelPipeline
lowercase_ : Tuple = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowercase_ : List[str] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
lowercase_ : List[str] = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
@property
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Tuple = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test', subfolder='test_unet', )
return unet
@property
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Tuple = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test', subfolder='test_unet_class_cond', )
return unet
def UpperCamelCase ( self, lowerCamelCase=False) -> Dict:
"""simple docstring"""
if class_cond:
_lowercase : Union[str, Any] = self.dummy_cond_unet
else:
_lowercase : Union[str, Any] = self.dummy_uncond_unet
# Default to CM multistep sampler
_lowercase : List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40, sigma_min=0.0_0_2, sigma_max=8_0.0, )
_lowercase : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=0) -> Tuple:
"""simple docstring"""
if str(lowerCamelCase).startswith('mps'):
_lowercase : str = torch.manual_seed(lowerCamelCase)
else:
_lowercase : int = torch.Generator(device=lowerCamelCase).manual_seed(lowerCamelCase)
_lowercase : Tuple = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [22, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase : Optional[int] = self.get_dummy_components()
_lowercase : str = ConsistencyModelPipeline(**lowerCamelCase)
_lowercase : Dict = pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Tuple = self.get_dummy_inputs(lowerCamelCase)
_lowercase : Optional[int] = pipe(**lowerCamelCase).images
assert image.shape == (1, 32, 32, 3)
_lowercase : int = image[0, -3:, -3:, -1]
_lowercase : Dict = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase : Dict = self.get_dummy_components(class_cond=lowerCamelCase)
_lowercase : Any = ConsistencyModelPipeline(**lowerCamelCase)
_lowercase : str = pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Any = self.get_dummy_inputs(lowerCamelCase)
_lowercase : Any = 0
_lowercase : List[str] = pipe(**lowerCamelCase).images
assert image.shape == (1, 32, 32, 3)
_lowercase : Any = image[0, -3:, -3:, -1]
_lowercase : Union[str, Any] = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase : Any = self.get_dummy_components()
_lowercase : Optional[Any] = ConsistencyModelPipeline(**lowerCamelCase)
_lowercase : List[str] = pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[Any] = self.get_dummy_inputs(lowerCamelCase)
_lowercase : Union[str, Any] = 1
_lowercase : Tuple = None
_lowercase : Tuple = pipe(**lowerCamelCase).images
assert image.shape == (1, 32, 32, 3)
_lowercase : str = image[0, -3:, -3:, -1]
_lowercase : List[str] = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase : Dict = self.get_dummy_components(class_cond=lowerCamelCase)
_lowercase : Dict = ConsistencyModelPipeline(**lowerCamelCase)
_lowercase : Optional[Any] = pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Tuple = self.get_dummy_inputs(lowerCamelCase)
_lowercase : Tuple = 1
_lowercase : int = None
_lowercase : Tuple = 0
_lowercase : Dict = pipe(**lowerCamelCase).images
assert image.shape == (1, 32, 32, 3)
_lowercase : List[str] = image[0, -3:, -3:, -1]
_lowercase : Any = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
@slow
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self, lowerCamelCase=0, lowerCamelCase=False, lowerCamelCase="cpu", lowerCamelCase=torch.floataa, lowerCamelCase=(1, 3, 64, 64)) -> Optional[Any]:
"""simple docstring"""
_lowercase : List[Any] = torch.manual_seed(lowerCamelCase)
_lowercase : str = {
'num_inference_steps': None,
'timesteps': [22, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
_lowercase : Optional[Any] = self.get_fixed_latents(seed=lowerCamelCase, device=lowerCamelCase, dtype=lowerCamelCase, shape=lowerCamelCase)
_lowercase : Tuple = latents
return inputs
def UpperCamelCase ( self, lowerCamelCase=0, lowerCamelCase="cpu", lowerCamelCase=torch.floataa, lowerCamelCase=(1, 3, 64, 64)) -> Any:
"""simple docstring"""
if type(lowerCamelCase) == str:
_lowercase : Union[str, Any] = torch.device(lowerCamelCase)
_lowercase : int = torch.Generator(device=lowerCamelCase).manual_seed(lowerCamelCase)
_lowercase : List[str] = randn_tensor(lowerCamelCase, generator=lowerCamelCase, device=lowerCamelCase, dtype=lowerCamelCase)
return latents
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Tuple = UNetaDModel.from_pretrained('diffusers/consistency_models', subfolder='diffusers_cd_imagenet64_l2')
_lowercase : Optional[int] = CMStochasticIterativeScheduler(
num_train_timesteps=40, sigma_min=0.0_0_2, sigma_max=8_0.0, )
_lowercase : Any = ConsistencyModelPipeline(unet=lowerCamelCase, scheduler=lowerCamelCase)
pipe.to(torch_device=lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : str = self.get_inputs()
_lowercase : Optional[int] = pipe(**lowerCamelCase).images
assert image.shape == (1, 64, 64, 3)
_lowercase : str = image[0, -3:, -3:, -1]
_lowercase : Optional[Any] = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : List[str] = UNetaDModel.from_pretrained('diffusers/consistency_models', subfolder='diffusers_cd_imagenet64_l2')
_lowercase : List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40, sigma_min=0.0_0_2, sigma_max=8_0.0, )
_lowercase : Union[str, Any] = ConsistencyModelPipeline(unet=lowerCamelCase, scheduler=lowerCamelCase)
pipe.to(torch_device=lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = self.get_inputs()
_lowercase : int = 1
_lowercase : Optional[Any] = None
_lowercase : str = pipe(**lowerCamelCase).images
assert image.shape == (1, 64, 64, 3)
_lowercase : List[Any] = image[0, -3:, -3:, -1]
_lowercase : List[str] = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
@require_torch_a
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : str = UNetaDModel.from_pretrained('diffusers/consistency_models', subfolder='diffusers_cd_imagenet64_l2')
_lowercase : Optional[int] = CMStochasticIterativeScheduler(
num_train_timesteps=40, sigma_min=0.0_0_2, sigma_max=8_0.0, )
_lowercase : Optional[int] = ConsistencyModelPipeline(unet=lowerCamelCase, scheduler=lowerCamelCase)
pipe.to(torch_device=lowerCamelCase, torch_dtype=torch.floataa)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Any = self.get_inputs(get_fixed_latents=lowerCamelCase, device=lowerCamelCase)
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=lowerCamelCase, enable_math=lowerCamelCase, enable_mem_efficient=lowerCamelCase):
_lowercase : Dict = pipe(**lowerCamelCase).images
assert image.shape == (1, 64, 64, 3)
_lowercase : Any = image[0, -3:, -3:, -1]
_lowercase : Union[str, Any] = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
@require_torch_a
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Dict = UNetaDModel.from_pretrained('diffusers/consistency_models', subfolder='diffusers_cd_imagenet64_l2')
_lowercase : Optional[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40, sigma_min=0.0_0_2, sigma_max=8_0.0, )
_lowercase : int = ConsistencyModelPipeline(unet=lowerCamelCase, scheduler=lowerCamelCase)
pipe.to(torch_device=lowerCamelCase, torch_dtype=torch.floataa)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = self.get_inputs(get_fixed_latents=lowerCamelCase, device=lowerCamelCase)
_lowercase : int = 1
_lowercase : str = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=lowerCamelCase, enable_math=lowerCamelCase, enable_mem_efficient=lowerCamelCase):
_lowercase : Union[str, Any] = pipe(**lowerCamelCase).images
assert image.shape == (1, 64, 64, 3)
_lowercase : Any = image[0, -3:, -3:, -1]
_lowercase : int = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
| 21 | 1 |
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_lowerCAmelCase : Any = """bart"""
_lowerCAmelCase : int = True
@st.cache(allow_output_mutation=snake_case )
def SCREAMING_SNAKE_CASE__ ( )-> List[str]:
'''simple docstring'''
if LOAD_DENSE_INDEX:
UpperCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained("yjernite/retribert-base-uncased" )
UpperCAmelCase__ : Tuple = AutoModel.from_pretrained("yjernite/retribert-base-uncased" ).to("cuda:0" )
UpperCAmelCase__ : Dict = qar_model.eval()
else:
UpperCAmelCase__ , UpperCAmelCase__ : Dict = (None, None)
if MODEL_TYPE == "bart":
UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained("yjernite/bart_eli5" )
UpperCAmelCase__ : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained("yjernite/bart_eli5" ).to("cuda:0" )
UpperCAmelCase__ : int = torch.load("seq2seq_models/eli5_bart_model_blm_2.pth" )
sas_model.load_state_dict(save_dict["model"] )
UpperCAmelCase__ : Tuple = sas_model.eval()
else:
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = make_qa_sas_model(
model_name="t5-small" , from_file="seq2seq_models/eli5_t5_model_1024_4.pth" , device="cuda:0" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=snake_case )
def SCREAMING_SNAKE_CASE__ ( )-> str:
'''simple docstring'''
if LOAD_DENSE_INDEX:
UpperCAmelCase__ : List[Any] = faiss.StandardGpuResources()
UpperCAmelCase__ : Any = datasets.load_dataset(path="wiki_snippets" , name="wiki40b_en_100_0" )["train"]
UpperCAmelCase__ : Optional[int] = np.memmap(
"wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat" , dtype="float32" , mode="r" , shape=(wikiaab_passages.num_rows, 128) , )
UpperCAmelCase__ : Union[str, Any] = faiss.IndexFlatIP(128 )
UpperCAmelCase__ : Any = faiss.index_cpu_to_gpu(snake_case , 1 , snake_case )
wikiaab_gpu_index_flat.add(snake_case ) # TODO fix for larger GPU
else:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = (None, None)
UpperCAmelCase__ : int = Elasticsearch([{"host": "localhost", "port": "9200"}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=snake_case )
def SCREAMING_SNAKE_CASE__ ( )-> Tuple:
'''simple docstring'''
UpperCAmelCase__ : List[str] = datasets.load_dataset("eli5" , name="LFQA_reddit" )
UpperCAmelCase__ : Any = elia["train_eli5"]
UpperCAmelCase__ : List[Any] = np.memmap(
"eli5_questions_reps.dat" , dtype="float32" , mode="r" , shape=(elia_train.num_rows, 128) )
UpperCAmelCase__ : List[Any] = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(snake_case )
return (elia_train, eli5_train_q_index)
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = load_indexes()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = load_models()
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = load_train_data()
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : str=10 )-> List[str]:
'''simple docstring'''
UpperCAmelCase__ : str = embed_questions_for_retrieval([question] , snake_case , snake_case )
UpperCAmelCase__ , UpperCAmelCase__ : Any = eli5_train_q_index.search(snake_case , snake_case )
UpperCAmelCase__ : List[str] = [elia_train[int(snake_case )] for i in I[0]]
return nn_examples
def SCREAMING_SNAKE_CASE__ ( snake_case : Dict , snake_case : Tuple="wiki40b" , snake_case : Dict="dense" , snake_case : Optional[int]=10 )-> Any:
'''simple docstring'''
if source == "none":
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = (" <P> ".join(["" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
UpperCAmelCase__ , UpperCAmelCase__ : int = query_qa_dense_index(
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )
else:
UpperCAmelCase__ , UpperCAmelCase__ : Any = query_es_index(
snake_case , snake_case , index_name="english_wiki40b_snippets_100w" , n_results=snake_case , )
UpperCAmelCase__ : Optional[Any] = [
(res["article_title"], res["section_title"].strip(), res["score"], res["passage_text"]) for res in hit_lst
]
UpperCAmelCase__ : str = "question: {} context: {}".format(snake_case , snake_case )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda snake_case : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda snake_case : None),
} )
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Optional[int] , snake_case : Any , snake_case : Dict=64 , snake_case : Any=256 , snake_case : Optional[Any]=False , snake_case : Optional[int]=2 , snake_case : Optional[Any]=0.95 , snake_case : Dict=0.8 )-> Optional[Any]:
'''simple docstring'''
with torch.no_grad():
UpperCAmelCase__ : List[str] = qa_sas_generate(
snake_case , snake_case , snake_case , num_answers=1 , num_beams=snake_case , min_len=snake_case , max_len=snake_case , do_sample=snake_case , temp=snake_case , top_p=snake_case , top_k=snake_case , max_input_length=1024 , device="cuda:0" , )[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
_lowerCAmelCase : Dict = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
_lowerCAmelCase : Union[str, Any] = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_lowerCAmelCase : Optional[Any] = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
_lowerCAmelCase : int = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
_lowerCAmelCase : Union[str, Any] = st.sidebar.checkbox("""Demo options""")
if demo_options:
_lowerCAmelCase : str = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
_lowerCAmelCase : List[str] = action_list.index(action_st)
_lowerCAmelCase : int = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
_lowerCAmelCase : int = show_type == """Show full text of passages"""
else:
_lowerCAmelCase : Optional[int] = 3
_lowerCAmelCase : Any = True
_lowerCAmelCase : str = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
_lowerCAmelCase : Union[str, Any] = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
_lowerCAmelCase : Any = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
_lowerCAmelCase : Optional[Any] = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
_lowerCAmelCase : Union[str, Any] = """wiki40b"""
_lowerCAmelCase : Any = """dense"""
_lowerCAmelCase : Tuple = """beam"""
_lowerCAmelCase : Dict = 2
_lowerCAmelCase : Any = 64
_lowerCAmelCase : Optional[int] = 256
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : Tuple = None
_lowerCAmelCase : Dict = st.sidebar.checkbox("""Generation options""")
if generate_options:
_lowerCAmelCase : str = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
_lowerCAmelCase : Optional[Any] = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
_lowerCAmelCase : Tuple = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
_lowerCAmelCase : List[str] = st.sidebar.slider(
"""Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
_lowerCAmelCase : int = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_lowerCAmelCase : str = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_lowerCAmelCase : int = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_lowerCAmelCase : Dict = None
# start main text
_lowerCAmelCase : Optional[Any] = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
_lowerCAmelCase : Union[str, Any] = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_lowerCAmelCase : Optional[int] = st.text_input("""Enter your question here:""", """""")
else:
_lowerCAmelCase : Optional[Any] = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = make_support(question, source=wiki_source, method="""dense""", n_results=10)
_lowerCAmelCase , _lowerCAmelCase : int = make_support(question, source=wiki_source, method="""sparse""", n_results=10)
_lowerCAmelCase : Any = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_lowerCAmelCase : int = support_list[:10]
_lowerCAmelCase : Any = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
_lowerCAmelCase , _lowerCAmelCase : Any = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
_lowerCAmelCase : List[Any] = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
_lowerCAmelCase : int = res[1].strip()
if sec_titles == "":
_lowerCAmelCase : int = """[{}]({})""".format(res[0], wiki_url)
else:
_lowerCAmelCase : Any = sec_titles.split(""" & """)
_lowerCAmelCase : Tuple = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
_lowerCAmelCase : int = find_nearest_training(question)
_lowerCAmelCase : Optional[Any] = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
_lowerCAmelCase : str = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
_lowerCAmelCase : Union[str, Any] = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 298 |
"""simple docstring"""
import qiskit
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int )-> qiskit.result.counts.Counts:
'''simple docstring'''
UpperCAmelCase__ : str = qiskit.Aer.get_backend("aer_simulator" )
UpperCAmelCase__ : Optional[int] = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
UpperCAmelCase__ : Optional[int] = qiskit.execute(snake_case , snake_case , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(snake_case )
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = half_adder(1, 1)
print(F"""Half Adder Output Qubit Counts: {counts}""")
| 298 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase = {
'''configuration_nllb_moe''': [
'''NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''NllbMoeConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NllbMoeForConditionalGeneration''',
'''NllbMoeModel''',
'''NllbMoePreTrainedModel''',
'''NllbMoeTop2Router''',
'''NllbMoeSparseMLP''',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 275 |
'''simple docstring'''
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class UpperCAmelCase ( snake_case_ ):
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
_lowerCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__snake_case , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(__snake_case , """num_attention_heads""" ) )
self.parent.assertTrue(hasattr(__snake_case , """num_encoder_blocks""" ) )
class UpperCAmelCase :
def __init__( self : Optional[int] , __snake_case : str , __snake_case : Dict=13 , __snake_case : str=64 , __snake_case : Dict=3 , __snake_case : Dict=4 , __snake_case : Tuple=[2, 2, 2, 2] , __snake_case : int=[8, 4, 2, 1] , __snake_case : List[str]=[16, 32, 64, 1_28] , __snake_case : Optional[Any]=[1, 4, 8, 16] , __snake_case : Dict=[1, 2, 4, 8] , __snake_case : Optional[Any]=True , __snake_case : List[str]=True , __snake_case : int="gelu" , __snake_case : Optional[Any]=0.1 , __snake_case : Any=0.1 , __snake_case : Tuple=0.02 , __snake_case : Union[str, Any]=3 , __snake_case : Tuple=None , ) -> List[str]:
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = image_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = num_encoder_blocks
_lowerCAmelCase = sr_ratios
_lowerCAmelCase = depths
_lowerCAmelCase = hidden_sizes
_lowerCAmelCase = downsampling_rates
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = is_training
_lowerCAmelCase = use_labels
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_labels
_lowerCAmelCase = scope
def lowercase__ ( self : int ) -> Union[str, Any]:
_lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : List[Any] ) -> List[str]:
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def lowercase__ ( self : Tuple , __snake_case : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Optional[int] ) -> Tuple:
_lowerCAmelCase = SegformerModel(config=__snake_case )
model.to(__snake_case )
model.eval()
_lowerCAmelCase = model(__snake_case )
_lowerCAmelCase = _lowerCAmelCase = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def lowercase__ ( self : List[str] , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : Optional[int] ) -> List[str]:
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = SegformerForSemanticSegmentation(__snake_case )
model.to(__snake_case )
model.eval()
_lowerCAmelCase = model(__snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
_lowerCAmelCase = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def lowercase__ ( self : str , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Dict ) -> List[str]:
_lowerCAmelCase = 1
_lowerCAmelCase = SegformerForSemanticSegmentation(config=__snake_case )
model.to(__snake_case )
model.eval()
_lowerCAmelCase = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(__snake_case )
_lowerCAmelCase = model(__snake_case , labels=__snake_case )
self.parent.assertGreater(result.loss , 0.0 )
def lowercase__ ( self : Optional[int] ) -> int:
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs
_lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
_lowercase: Any = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
_lowercase: Tuple = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowercase: Tuple = True
_lowercase: Union[str, Any] = False
_lowercase: Dict = False
_lowercase: Optional[Any] = False
def lowercase__ ( self : Tuple ) -> Any:
_lowerCAmelCase = SegformerModelTester(self )
_lowerCAmelCase = SegformerConfigTester(self , config_class=__snake_case )
def lowercase__ ( self : Optional[Any] ) -> Dict:
self.config_tester.run_common_tests()
def lowercase__ ( self : int ) -> Union[str, Any]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def lowercase__ ( self : Dict ) -> int:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*__snake_case )
def lowercase__ ( self : Dict ) -> Dict:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*__snake_case )
@unittest.skip("""SegFormer does not use inputs_embeds""" )
def lowercase__ ( self : int ) -> Union[str, Any]:
pass
@unittest.skip("""SegFormer does not have get_input_embeddings method and get_output_embeddings methods""" )
def lowercase__ ( self : Optional[int] ) -> int:
pass
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(__snake_case )
_lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase = [*signature.parameters.keys()]
_lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __snake_case )
def lowercase__ ( self : Tuple ) -> Tuple:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = True
for model_class in self.all_model_classes:
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) )
_lowerCAmelCase = outputs.attentions
_lowerCAmelCase = sum(self.model_tester.depths )
self.assertEqual(len(__snake_case ) , __snake_case )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowerCAmelCase = True
_lowerCAmelCase = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) )
_lowerCAmelCase = outputs.attentions
self.assertEqual(len(__snake_case ) , __snake_case )
# verify the first attentions (first block, first layer)
_lowerCAmelCase = (self.model_tester.image_size // 4) ** 2
_lowerCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
_lowerCAmelCase = (self.model_tester.image_size // 32) ** 2
_lowerCAmelCase = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
_lowerCAmelCase = len(__snake_case )
# Check attention is always last and order is fine
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) )
self.assertEqual(out_len + 1 , len(__snake_case ) )
_lowerCAmelCase = outputs.attentions
self.assertEqual(len(__snake_case ) , __snake_case )
# verify the first attentions (first block, first layer)
_lowerCAmelCase = (self.model_tester.image_size // 4) ** 2
_lowerCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def lowercase__ ( self : int ) -> List[str]:
def check_hidden_states_output(__snake_case : str , __snake_case : Tuple , __snake_case : Optional[int] ):
_lowerCAmelCase = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) )
_lowerCAmelCase = outputs.hidden_states
_lowerCAmelCase = self.model_tester.num_encoder_blocks
self.assertEqual(len(__snake_case ) , __snake_case )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
def lowercase__ ( self : Optional[Any] ) -> Any:
if not self.model_tester.is_training:
return
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(__snake_case ):
continue
_lowerCAmelCase = model_class(__snake_case )
model.to(__snake_case )
model.train()
_lowerCAmelCase = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
_lowerCAmelCase = model(**__snake_case ).loss
loss.backward()
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowercase__ ( self : Tuple ) -> Dict:
pass
@slow
def lowercase__ ( self : str ) -> Optional[int]:
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = SegformerModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
@slow
def lowercase__ ( self : Union[str, Any] ) -> Any:
# only resize + normalize
_lowerCAmelCase = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=__snake_case , align=__snake_case , do_random_crop=__snake_case )
_lowerCAmelCase = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
__snake_case )
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=__snake_case , return_tensors="""pt""" )
_lowerCAmelCase = encoded_inputs.pixel_values.to(__snake_case )
with torch.no_grad():
_lowerCAmelCase = model(__snake_case )
_lowerCAmelCase = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , __snake_case )
_lowerCAmelCase = torch.tensor(
[
[[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]],
[[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]],
[[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]],
] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __snake_case , atol=1E-4 ) )
@slow
def lowercase__ ( self : Optional[Any] ) -> Any:
# only resize + normalize
_lowerCAmelCase = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=__snake_case , align=__snake_case , do_random_crop=__snake_case )
_lowerCAmelCase = SegformerForSemanticSegmentation.from_pretrained(
"""nvidia/segformer-b1-finetuned-cityscapes-1024-1024""" ).to(__snake_case )
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=__snake_case , return_tensors="""pt""" )
_lowerCAmelCase = encoded_inputs.pixel_values.to(__snake_case )
with torch.no_grad():
_lowerCAmelCase = model(__snake_case )
_lowerCAmelCase = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , __snake_case )
_lowerCAmelCase = torch.tensor(
[
[[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]],
[[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]],
[[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]],
] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __snake_case , atol=1E-1 ) )
@slow
def lowercase__ ( self : Any ) -> str:
# only resize + normalize
_lowerCAmelCase = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=__snake_case , align=__snake_case , do_random_crop=__snake_case )
_lowerCAmelCase = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
__snake_case )
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=__snake_case , return_tensors="""pt""" )
_lowerCAmelCase = encoded_inputs.pixel_values.to(__snake_case )
with torch.no_grad():
_lowerCAmelCase = model(__snake_case )
_lowerCAmelCase = outputs.logits.detach().cpu()
_lowerCAmelCase = image_processor.post_process_semantic_segmentation(outputs=__snake_case , target_sizes=[(5_00, 3_00)] )
_lowerCAmelCase = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , __snake_case )
_lowerCAmelCase = image_processor.post_process_semantic_segmentation(outputs=__snake_case )
_lowerCAmelCase = torch.Size((1_28, 1_28) )
self.assertEqual(segmentation[0].shape , __snake_case )
| 70 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
lowerCAmelCase : Any = {
'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """lxmert"""
lowerCAmelCase_ = {}
def __init__( self , A_=30522 , A_=768 , A_=12 , A_=9500 , A_=1600 , A_=400 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=9 , A_=5 , A_=5 , A_=2048 , A_=4 , A_=6.67 , A_=True , A_=True , A_=True , A_=True , A_=True , A_=True , A_=True , **A_ , )-> int:
'''simple docstring'''
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = num_qa_labels
UpperCamelCase = num_object_labels
UpperCamelCase = num_attr_labels
UpperCamelCase = l_layers
UpperCamelCase = x_layers
UpperCamelCase = r_layers
UpperCamelCase = visual_feat_dim
UpperCamelCase = visual_pos_dim
UpperCamelCase = visual_loss_normalizer
UpperCamelCase = task_matched
UpperCamelCase = task_mask_lm
UpperCamelCase = task_obj_predict
UpperCamelCase = task_qa
UpperCamelCase = visual_obj_loss
UpperCamelCase = visual_attr_loss
UpperCamelCase = visual_feat_loss
UpperCamelCase = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**A_ )
| 251 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def A_( A : list[int] , A : list[int] , A : int):
UpperCamelCase = [0] * no_of_processes
UpperCamelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(A):
UpperCamelCase = burst_time[i]
UpperCamelCase = []
UpperCamelCase = 0
UpperCamelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
UpperCamelCase = []
UpperCamelCase = -1
for i in range(A):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(A)
if len(A) > 0:
UpperCamelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
UpperCamelCase = i
total_time += burst_time[target_process]
completed += 1
UpperCamelCase = 0
UpperCamelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def A_( A : list[int] , A : int , A : list[int]):
UpperCamelCase = [0] * no_of_processes
for i in range(A):
UpperCamelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('[TEST CASE 01]')
lowerCAmelCase : int = 4
lowerCAmelCase : Any = [2, 5, 3, 7]
lowerCAmelCase : int = [0, 0, 0, 0]
lowerCAmelCase : Dict = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowerCAmelCase : Optional[Any] = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time')
for i, process_id in enumerate(list(range(1, 5))):
print(
f"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
f"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(f"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(f"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 251 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ):
'''simple docstring'''
__A : List[str] = parent
__A : int = 13
__A : Optional[int] = 7
__A : int = True
__A : List[str] = True
__A : str = True
__A : int = True
__A : int = 99
__A : str = 32
__A : Tuple = 2
__A : int = 4
__A : str = 37
__A : List[str] = """gelu"""
__A : Tuple = 0.1
__A : Tuple = 0.1
__A : Optional[Any] = 512
__A : Union[str, Any] = 16
__A : Optional[Any] = 2
__A : Union[str, Any] = 0.02
__A : Union[str, Any] = 3
__A : Tuple = 4
__A : List[Any] = None
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__A : List[Any] = None
if self.use_input_mask:
__A : int = random_attention_mask([self.batch_size, self.seq_length])
__A : Any = None
if self.use_token_type_ids:
__A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__A : Dict = None
__A : List[Any] = None
__A : Dict = None
if self.use_labels:
__A : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__A : int = ids_tensor([self.batch_size] , self.num_choices)
__A : Optional[Any] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : int = TFRoFormerModel(config=_UpperCAmelCase)
__A : int = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__A : int = [input_ids, input_mask]
__A : Tuple = model(_UpperCAmelCase)
__A : Union[str, Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[Any] = True
__A : Any = TFRoFormerForCausalLM(config=_UpperCAmelCase)
__A : Union[str, Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__A : Dict = model(_UpperCAmelCase)["""logits"""]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape) , [self.batch_size, self.seq_length, self.vocab_size])
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Union[str, Any] = TFRoFormerForMaskedLM(config=_UpperCAmelCase)
__A : Optional[int] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__A : Optional[int] = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : List[str] = self.num_labels
__A : Optional[int] = TFRoFormerForSequenceClassification(config=_UpperCAmelCase)
__A : Optional[int] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__A : Tuple = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = self.num_choices
__A : List[Any] = TFRoFormerForMultipleChoice(config=_UpperCAmelCase)
__A : Optional[int] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : Any = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : int = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : List[str] = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
__A : List[str] = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Any = self.num_labels
__A : Tuple = TFRoFormerForTokenClassification(config=_UpperCAmelCase)
__A : Tuple = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__A : Union[str, Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : int = TFRoFormerForQuestionAnswering(config=_UpperCAmelCase)
__A : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__A : Optional[int] = model(_UpperCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = self.prepare_config_and_inputs()
(
__A
) : Any = config_and_inputs
__A : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE (UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase = (
{
"feature-extraction": TFRoFormerModel,
"fill-mask": TFRoFormerForMaskedLM,
"question-answering": TFRoFormerForQuestionAnswering,
"text-classification": TFRoFormerForSequenceClassification,
"text-generation": TFRoFormerForCausalLM,
"token-classification": TFRoFormerForTokenClassification,
"zero-shot": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = TFRoFormerModelTester(self)
__A : Any = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base')
self.assertIsNotNone(_UpperCAmelCase)
@require_tf
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base')
__A : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]])
__A : Optional[Any] = model(_UpperCAmelCase)[0]
# TODO Replace vocab size
__A : Tuple = 5_0000
__A : Optional[int] = [1, 6, vocab_size]
self.assertEqual(output.shape , _UpperCAmelCase)
print(output[:, :3, :3])
# TODO Replace values below with what was printed above.
__A : Dict = tf.constant(
[
[
[-0.12053341, -1.0264901, 0.29221946],
[-1.5133783, 0.197433, 0.15190607],
[-5.0135403, -3.900256, -0.84038764],
]
])
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4)
@require_tf
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
lowerCAmelCase = 1E-4
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = tf.constant([[4, 10]])
__A : int = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6)
__A : Optional[Any] = emba(input_ids.shape)
__A : Union[str, Any] = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]])
tf.debugging.assert_near(_UpperCAmelCase , _UpperCAmelCase , atol=self.tolerance)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
])
__A : Tuple = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512)
emba([2, 16, 512])
__A : Union[str, Any] = emba.weight[:3, :5]
tf.debugging.assert_near(_UpperCAmelCase , _UpperCAmelCase , atol=self.tolerance)
@require_tf
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
lowerCAmelCase = 1E-4
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa) , shape=(2, 12, 16, 64)) / 100
__A : int = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa) , shape=(2, 12, 16, 64)) / 100
__A : List[str] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64)
__A : List[Any] = embed_positions([2, 16, 768])[None, None, :, :]
__A : Dict = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
__A : Any = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
])
__A : Dict = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
])
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , _UpperCAmelCase , atol=self.tolerance)
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , _UpperCAmelCase , atol=self.tolerance) | 190 |
'''simple docstring'''
from __future__ import annotations
from typing import TypedDict
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : str
__UpperCamelCase : int
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ) -> list[str]:
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
raise TypeError("""The parameter s type must be str.""" )
return [s[i:] + s[:i] for i in range(len(_UpperCAmelCase ) )]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ) -> BWTTransformDict:
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
raise TypeError("""The parameter s type must be str.""" )
if not s:
raise ValueError("""The parameter s must not be empty.""" )
_a : List[Any] =all_rotations(_UpperCAmelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_a : BWTTransformDict ={
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(_UpperCAmelCase ),
}
return response
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : int ) -> str:
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
raise TypeError("""The parameter bwt_string type must be str.""" )
if not bwt_string:
raise ValueError("""The parameter bwt_string must not be empty.""" )
try:
_a : List[str] =int(_UpperCAmelCase )
except ValueError:
raise TypeError(
"""The parameter idx_original_string type must be int or passive"""
""" of cast to int.""" )
if idx_original_string < 0:
raise ValueError("""The parameter idx_original_string must not be lower than 0.""" )
if idx_original_string >= len(_UpperCAmelCase ):
raise ValueError(
"""The parameter idx_original_string must be lower than""" """ len(bwt_string).""" )
_a : Optional[int] =[""""""] * len(_UpperCAmelCase )
for _ in range(len(_UpperCAmelCase ) ):
for i in range(len(_UpperCAmelCase ) ):
_a : int =bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
A__: Any = '''Provide a string that I will generate its BWT transform: '''
A__: Union[str, Any] = input(entry_msg).strip()
A__: Optional[int] = bwt_transform(s)
print(
F"Burrows Wheeler transform for string '{s}' results "
F"in '{result['bwt_string']}'"
)
A__: Union[str, Any] = reverse_bwt(result['''bwt_string'''], result['''idx_original_string'''])
print(
F"Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' "
F"we get original string '{original_string}'"
)
| 276 | 0 |
import datasets
from .evaluate import evaluate
lowerCAmelCase__ :List[Any] = """\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
"""
lowerCAmelCase__ :Any = """
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
"""
lowerCAmelCase__ :int = """
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the CUAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
'aupr': Area Under the Precision-Recall curve
'prec_at_80_recall': Precision at 80% recall
'prec_at_90_recall': Precision at 90% recall
Examples:
>>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> cuad_metric = datasets.load_metric(\"cuad\")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __a ( datasets.Metric ):
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': {
'id': datasets.Value('string' ),
'prediction_text': datasets.features.Sequence(datasets.Value('string' ) ),
},
'references': {
'id': datasets.Value('string' ),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
},
} ) , codebase_urls=['https://www.atticusprojectai.org/cuad'] , reference_urls=['https://www.atticusprojectai.org/cuad'] , )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase = {prediction['id']: prediction['prediction_text'] for prediction in predictions}
_UpperCAmelCase = [
{
'paragraphs': [
{
'qas': [
{
'answers': [{'text': answer_text} for answer_text in ref['answers']['text']],
'id': ref['id'],
}
for ref in references
]
}
]
}
]
_UpperCAmelCase = evaluate(dataset=__lowercase , predictions=__lowercase )
return score
| 359 |
from importlib import import_module
from .logging import get_logger
lowerCAmelCase__ :Optional[Any] = get_logger(__name__)
class __a :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('__' ):
setattr(self , _SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = module._original_module if isinstance(_SCREAMING_SNAKE_CASE , _PatchedModuleObj ) else module
class __a :
_a : Any = []
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = obj
_UpperCAmelCase = target
_UpperCAmelCase = new
_UpperCAmelCase = target.split('.' )[0]
_UpperCAmelCase = {}
_UpperCAmelCase = attrs or []
def __enter__( self ) -> int:
"""simple docstring"""
*_UpperCAmelCase , _UpperCAmelCase = self.target.split('.' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
try:
_UpperCAmelCase = import_module('.'.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
_UpperCAmelCase = getattr(self.obj , _SCREAMING_SNAKE_CASE )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(_SCREAMING_SNAKE_CASE , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
_UpperCAmelCase = obj_attr
# patch at top level
setattr(self.obj , _SCREAMING_SNAKE_CASE , _PatchedModuleObj(_SCREAMING_SNAKE_CASE , attrs=self.attrs ) )
_UpperCAmelCase = getattr(self.obj , _SCREAMING_SNAKE_CASE )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _PatchedModuleObj(getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , attrs=self.attrs ) )
_UpperCAmelCase = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# finally set the target attribute
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
_UpperCAmelCase = getattr(import_module('.'.join(_SCREAMING_SNAKE_CASE ) ) , _SCREAMING_SNAKE_CASE )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , _SCREAMING_SNAKE_CASE ) is attr_value:
_UpperCAmelCase = getattr(self.obj , _SCREAMING_SNAKE_CASE )
setattr(self.obj , _SCREAMING_SNAKE_CASE , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
_UpperCAmelCase = globals()['__builtins__'][target_attr]
setattr(self.obj , _SCREAMING_SNAKE_CASE , self.new )
else:
raise RuntimeError(f'''Tried to patch attribute {target_attr} instead of a submodule.''' )
def __exit__( self , *_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
for attr in list(self.original ):
setattr(self.obj , _SCREAMING_SNAKE_CASE , self.original.pop(_SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
self.__enter__()
self._active_patches.append(self )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 185 | 0 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : jnp.ndarray
@flax_register_to_config
class A__ ( nn.Module , lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCAmelCase__ : int = 32
lowerCAmelCase__ : int = 4
lowerCAmelCase__ : int = 4
lowerCAmelCase__ : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowerCAmelCase__ : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
lowerCAmelCase__ : Union[bool, Tuple[bool]] = False
lowerCAmelCase__ : Tuple[int] = (320, 640, 1280, 1280)
lowerCAmelCase__ : int = 2
lowerCAmelCase__ : Union[int, Tuple[int]] = 8
lowerCAmelCase__ : Optional[Union[int, Tuple[int]]] = None
lowerCAmelCase__ : int = 1280
lowerCAmelCase__ : float = 0.0
lowerCAmelCase__ : bool = False
lowerCAmelCase__ : jnp.dtype = jnp.floataa
lowerCAmelCase__ : bool = True
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : bool = False
def a__ ( self : int , _UpperCAmelCase : jax.random.KeyArray ) -> FrozenDict:
"""simple docstring"""
__lowercase = (1, self.in_channels, self.sample_size, self.sample_size)
__lowercase = jnp.zeros(_UpperCAmelCase , dtype=jnp.floataa )
__lowercase = jnp.ones((1,) , dtype=jnp.intaa )
__lowercase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
__lowercase , __lowercase = jax.random.split(_UpperCAmelCase )
__lowercase = {'params': params_rng, 'dropout': dropout_rng}
return self.init(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )["params"]
def a__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.block_out_channels
__lowercase = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__lowercase = self.num_attention_heads or self.attention_head_dim
# input
__lowercase = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
__lowercase = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
__lowercase = FlaxTimestepEmbedding(_UpperCAmelCase , dtype=self.dtype )
__lowercase = self.only_cross_attention
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = (num_attention_heads,) * len(self.down_block_types )
# down
__lowercase = []
__lowercase = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
__lowercase = output_channel
__lowercase = block_out_channels[i]
__lowercase = i == len(_UpperCAmelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__lowercase = FlaxCrossAttnDownBlockaD(
in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__lowercase = FlaxDownBlockaD(
in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_UpperCAmelCase )
__lowercase = down_blocks
# mid
__lowercase = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
__lowercase = []
__lowercase = list(reversed(_UpperCAmelCase ) )
__lowercase = list(reversed(_UpperCAmelCase ) )
__lowercase = list(reversed(_UpperCAmelCase ) )
__lowercase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
__lowercase = output_channel
__lowercase = reversed_block_out_channels[i]
__lowercase = reversed_block_out_channels[min(i + 1 , len(_UpperCAmelCase ) - 1 )]
__lowercase = i == len(_UpperCAmelCase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
__lowercase = FlaxCrossAttnUpBlockaD(
in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , prev_output_channel=_UpperCAmelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__lowercase = FlaxUpBlockaD(
in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , prev_output_channel=_UpperCAmelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(_UpperCAmelCase )
__lowercase = output_channel
__lowercase = up_blocks
# out
__lowercase = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
__lowercase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Dict=None , _UpperCAmelCase : int=None , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]:
"""simple docstring"""
if not isinstance(_UpperCAmelCase , jnp.ndarray ):
__lowercase = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_UpperCAmelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
__lowercase = timesteps.astype(dtype=jnp.floataa )
__lowercase = jnp.expand_dims(_UpperCAmelCase , 0 )
__lowercase = self.time_proj(_UpperCAmelCase )
__lowercase = self.time_embedding(_UpperCAmelCase )
# 2. pre-process
__lowercase = jnp.transpose(_UpperCAmelCase , (0, 2, 3, 1) )
__lowercase = self.conv_in(_UpperCAmelCase )
# 3. down
__lowercase = (sample,)
for down_block in self.down_blocks:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase , __lowercase = down_block(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , deterministic=not train )
else:
__lowercase , __lowercase = down_block(_UpperCAmelCase , _UpperCAmelCase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
__lowercase = ()
for down_block_res_sample, down_block_additional_residual in zip(
_UpperCAmelCase , _UpperCAmelCase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
__lowercase = new_down_block_res_samples
# 4. mid
__lowercase = self.mid_block(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
__lowercase = down_block_res_samples[-(self.layers_per_block + 1) :]
__lowercase = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = up_block(
_UpperCAmelCase , temb=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , res_hidden_states_tuple=_UpperCAmelCase , deterministic=not train , )
else:
__lowercase = up_block(_UpperCAmelCase , temb=_UpperCAmelCase , res_hidden_states_tuple=_UpperCAmelCase , deterministic=not train )
# 6. post-process
__lowercase = self.conv_norm_out(_UpperCAmelCase )
__lowercase = nn.silu(_UpperCAmelCase )
__lowercase = self.conv_out(_UpperCAmelCase )
__lowercase = jnp.transpose(_UpperCAmelCase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=_UpperCAmelCase )
| 325 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
SCREAMING_SNAKE_CASE__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""BartphoTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 325 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowercase__ ( unittest.TestCase):
def __init__( self : int , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any]=7 , UpperCamelCase__ : Any=3 , UpperCamelCase__ : Dict=18 , UpperCamelCase__ : List[Any]=30 , UpperCamelCase__ : Optional[int]=400 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : str=None , UpperCamelCase__ : List[str]=True , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = size if size is not None else {'''height''': 18, '''width''': 18}
SCREAMING_SNAKE_CASE : int = parent
SCREAMING_SNAKE_CASE : List[str] = batch_size
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : Optional[int] = min_resolution
SCREAMING_SNAKE_CASE : Union[str, Any] = max_resolution
SCREAMING_SNAKE_CASE : Optional[int] = do_resize
SCREAMING_SNAKE_CASE : str = size
SCREAMING_SNAKE_CASE : List[str] = apply_ocr
def __A ( self : Optional[Any] ):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowercase__ ( UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = LayoutLMvaImageProcessingTester(self )
@property
def __A ( self : str ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''size''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''apply_ocr''' ) )
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def __A ( self : List[str] ):
'''simple docstring'''
pass
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , UpperCamelCase__ )
self.assertIsInstance(encoding.boxes , UpperCamelCase__ )
# Test batched
SCREAMING_SNAKE_CASE : Optional[int] = image_processing(UpperCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(UpperCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Any = image_processing(UpperCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = LayoutLMvaImageProcessor()
from datasets import load_dataset
SCREAMING_SNAKE_CASE : Dict = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
SCREAMING_SNAKE_CASE : Dict = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
SCREAMING_SNAKE_CASE : Any = image_processing(UpperCamelCase__ , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
SCREAMING_SNAKE_CASE : Optional[int] = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
SCREAMING_SNAKE_CASE : Optional[Any] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCamelCase__ )
self.assertListEqual(encoding.boxes , UpperCamelCase__ )
# with apply_OCR = False
SCREAMING_SNAKE_CASE : str = LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = image_processing(UpperCamelCase__ , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 258 | import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {'vocab_file': 'vocab.txt'}
__UpperCamelCase : Tuple = {
'vocab_file': {
'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt',
'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt',
},
}
__UpperCamelCase : Union[str, Any] = {
'facebook/esm2_t6_8M_UR50D': 1024,
'facebook/esm2_t12_35M_UR50D': 1024,
}
def A ( _lowercase ):
with open(_lowercase , '''r''' ) as f:
SCREAMING_SNAKE_CASE : Optional[int] = f.read().splitlines()
return [l.strip() for l in lines]
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ["""input_ids""", """attention_mask"""]
def __init__( self : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple="<unk>" , UpperCamelCase__ : Union[str, Any]="<cls>" , UpperCamelCase__ : Dict="<pad>" , UpperCamelCase__ : str="<mask>" , UpperCamelCase__ : Any="<eos>" , **UpperCamelCase__ : int , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = load_vocab_file(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = dict(enumerate(self.all_tokens ) )
SCREAMING_SNAKE_CASE : List[Any] = {tok: ind for ind, tok in enumerate(self.all_tokens )}
SCREAMING_SNAKE_CASE : Union[str, Any] = unk_token
SCREAMING_SNAKE_CASE : Any = cls_token
SCREAMING_SNAKE_CASE : List[str] = pad_token
SCREAMING_SNAKE_CASE : List[str] = mask_token
SCREAMING_SNAKE_CASE : Any = eos_token
SCREAMING_SNAKE_CASE : List[str] = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __A ( self : Union[str, Any] , UpperCamelCase__ : int ):
'''simple docstring'''
return self._id_to_token.get(UpperCamelCase__ , self.unk_token )
def __A ( self : Dict , UpperCamelCase__ : str ):
'''simple docstring'''
return self._token_to_id.get(UpperCamelCase__ , self._token_to_id.get(self.unk_token ) )
def __A ( self : List[Any] , UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
return text.split()
def __A ( self : List[str] , UpperCamelCase__ : Dict=False ):
'''simple docstring'''
return len(self._id_to_token )
def __A ( self : Optional[Any] ):
'''simple docstring'''
return {token: i for i, token in enumerate(self.all_tokens )}
def __A ( self : Union[str, Any] , UpperCamelCase__ : str ):
'''simple docstring'''
return self._token_to_id.get(UpperCamelCase__ , self._token_to_id.get(self.unk_token ) )
def __A ( self : List[str] , UpperCamelCase__ : int ):
'''simple docstring'''
return self._id_to_token.get(UpperCamelCase__ , self.unk_token )
def __A ( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = [self.cls_token_id]
SCREAMING_SNAKE_CASE : List[str] = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __A ( self : Union[str, Any] , UpperCamelCase__ : List , UpperCamelCase__ : Optional[List] = None , UpperCamelCase__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
SCREAMING_SNAKE_CASE : List[str] = [1] + ([0] * len(UpperCamelCase__ )) + [1]
if token_ids_a is not None:
mask += [0] * len(UpperCamelCase__ ) + [1]
return mask
def __A ( self : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = os.path.join(UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(UpperCamelCase__ , '''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def __A ( self : Dict ):
'''simple docstring'''
return self.get_vocab_size(with_added_tokens=UpperCamelCase__ )
def __A ( self : str , UpperCamelCase__ : Union[List[str], List[AddedToken]] , UpperCamelCase__ : bool = False ):
'''simple docstring'''
return super()._add_tokens(UpperCamelCase__ , special_tokens=UpperCamelCase__ )
| 258 | 1 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase , lowercase=13 , lowercase=2 , lowercase=24 , lowercase=16 , lowercase=True , lowercase=True , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=10 , lowercase=0.02 , lowercase=None , lowercase=2 , lowercase=2 , ):
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : Optional[int] = batch_size
_lowerCamelCase : Dict = patch_size
_lowerCamelCase : List[Any] = max_length
_lowerCamelCase : Optional[int] = num_mel_bins
_lowerCamelCase : List[str] = is_training
_lowerCamelCase : Optional[int] = use_labels
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : List[Any] = num_attention_heads
_lowerCamelCase : List[Any] = intermediate_size
_lowerCamelCase : str = hidden_act
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : List[str] = attention_probs_dropout_prob
_lowerCamelCase : List[str] = type_sequence_label_size
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : Dict = scope
_lowerCamelCase : List[str] = frequency_stride
_lowerCamelCase : int = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_lowerCamelCase : List[Any] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
_lowerCamelCase : str = (self.max_length - self.patch_size) // self.time_stride + 1
_lowerCamelCase : Optional[Any] = frequency_out_dimension * time_out_dimension
_lowerCamelCase : Dict = num_patches + 2
def A_ ( self ):
_lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
_lowerCamelCase : Union[str, Any] = None
if self.use_labels:
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : Dict = self.get_config()
return config, input_values, labels
def A_ ( self ):
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def A_ ( self , lowercase , lowercase , lowercase ):
_lowerCamelCase : Dict = ASTModel(config=lowercase )
model.to(lowercase )
model.eval()
_lowerCamelCase : str = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self ):
_lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
),
) : List[Any] = config_and_inputs
_lowerCamelCase : int = {'input_values': input_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( lowercase, lowercase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{"""audio-classification""": ASTForAudioClassification, """feature-extraction""": ASTModel}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase ):
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = ASTModelTester(self )
_lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37 )
def A_ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='AST does not use inputs_embeds' )
def A_ ( self ):
pass
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : int = model_class(lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCamelCase : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear ) )
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[str] = model_class(lowercase )
_lowerCamelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCamelCase : List[Any] = ['input_values']
self.assertListEqual(arg_names[:1] , lowercase )
def A_ ( self ):
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
@slow
def A_ ( self ):
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : int = ASTModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def _snake_case ( ):
_lowerCamelCase : Any = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' )
_lowerCamelCase, _lowerCamelCase : Dict = torchaudio.load(lowercase__ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A_ ( self ):
return (
ASTFeatureExtractor.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' )
if is_torchaudio_available()
else None
)
@slow
def A_ ( self ):
_lowerCamelCase : Optional[Any] = self.default_feature_extractor
_lowerCamelCase : str = ASTForAudioClassification.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' ).to(lowercase )
_lowerCamelCase : List[str] = self.default_feature_extractor
_lowerCamelCase, _lowerCamelCase : List[Any] = prepare_audio()
_lowerCamelCase : Union[str, Any] = audio.squeeze().numpy()
_lowerCamelCase : Dict = feature_extractor(lowercase , sampling_rate=lowercase , return_tensors='pt' ).to(lowercase )
# forward pass
with torch.no_grad():
_lowerCamelCase : int = model(**lowercase )
# verify the logits
_lowerCamelCase : Dict = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , lowercase )
_lowerCamelCase : Optional[Any] = torch.tensor([-0.87_60, -7.00_42, -8.66_02] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1E-4 ) ) | 96 | """simple docstring"""
import argparse
lowerCAmelCase__ : List[str] = 'docs/source/_static/js/custom.js'
def a_ ( lowerCamelCase ):
with open(lowerCamelCase , encoding='utf-8' , newline='\n' ) as f:
UpperCAmelCase__ = f.readlines()
UpperCAmelCase__ = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
UpperCAmelCase__ = f'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += f''' "v{version}": "v{version}",\n'''
with open(lowerCamelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--version', help='Release version.')
lowerCAmelCase__ : Optional[int] = parser.parse_args()
update_custom_js(args.version)
| 98 | 0 |
"""simple docstring"""
from maths.prime_check import is_prime
def __lowercase ( _a ):
if not isinstance(_a , _a ):
snake_case_ : List[Any] = f"Input value of [number={number}] must be an integer"
raise TypeError(_a )
if is_prime(_a ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 155 |
"""simple docstring"""
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _UpperCAmelCase ( lowerCAmelCase__):
def __init__( self : List[Any] , lowercase_ : NestedDataStructureLike[PathLike] , lowercase_ : Optional[NamedSplit] = None , lowercase_ : Optional[Features] = None , lowercase_ : str = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : Optional[int] = None , **lowercase_ : Optional[Any] , ):
super().__init__(
lowercase_ , split=lowercase_ , features=lowercase_ , cache_dir=lowercase_ , keep_in_memory=lowercase_ , streaming=lowercase_ , num_proc=lowercase_ , **lowercase_ , )
snake_case_ : List[Any] = path_or_paths if isinstance(lowercase_ , lowercase_ ) else {self.split: path_or_paths}
snake_case_ : str = Text(
cache_dir=lowercase_ , data_files=lowercase_ , features=lowercase_ , **lowercase_ , )
def _snake_case ( self : Any ):
# Build iterable dataset
if self.streaming:
snake_case_ : Union[str, Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
snake_case_ : List[Any] = None
snake_case_ : Optional[Any] = None
snake_case_ : str = None
snake_case_ : Optional[int] = None
self.builder.download_and_prepare(
download_config=lowercase_ , download_mode=lowercase_ , verification_mode=lowercase_ , base_path=lowercase_ , num_proc=self.num_proc , )
snake_case_ : Union[str, Any] = self.builder.as_dataset(
split=self.split , verification_mode=lowercase_ , in_memory=self.keep_in_memory )
return dataset
| 155 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.