code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
lowerCAmelCase : int = """
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
"""
lowerCAmelCase : str = """
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric(\"mean_iou\")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
"""
lowerCAmelCase : Optional[Any] = """\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}"""
def lowercase (_A , _A , _A , _A , _A = None , _A = False , ):
"""simple docstring"""
if label_map is not None:
for old_id, new_id in label_map.items():
_lowerCAmelCase : Tuple = new_id
# turn into Numpy arrays
_lowerCAmelCase : Dict = np.array(_A )
_lowerCAmelCase : Any = np.array(_A )
if reduce_labels:
_lowerCAmelCase : Optional[int] = 2_5_5
_lowerCAmelCase : Optional[int] = label - 1
_lowerCAmelCase : Optional[int] = 2_5_5
_lowerCAmelCase : Any = label != ignore_index
_lowerCAmelCase : Dict = np.not_equal(_A , _A )
_lowerCAmelCase : Dict = pred_label[mask]
_lowerCAmelCase : Union[str, Any] = np.array(_A )[mask]
_lowerCAmelCase : Dict = pred_label[pred_label == label]
_lowerCAmelCase : str = np.histogram(_A , bins=_A , range=(0, num_labels - 1) )[0]
_lowerCAmelCase : Union[str, Any] = np.histogram(_A , bins=_A , range=(0, num_labels - 1) )[0]
_lowerCAmelCase : int = np.histogram(_A , bins=_A , range=(0, num_labels - 1) )[0]
_lowerCAmelCase : List[str] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def lowercase (_A , _A , _A , _A , _A = None , _A = False , ):
"""simple docstring"""
_lowerCAmelCase : Dict = np.zeros((num_labels,) , dtype=np.floataa )
_lowerCAmelCase : Optional[Any] = np.zeros((num_labels,) , dtype=np.floataa )
_lowerCAmelCase : str = np.zeros((num_labels,) , dtype=np.floataa )
_lowerCAmelCase : Optional[Any] = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(_A , _A ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = intersect_and_union(
_A , _A , _A , _A , _A , _A )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def lowercase (_A , _A , _A , _A , _A = None , _A = None , _A = False , ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = total_intersect_and_union(
_A , _A , _A , _A , _A , _A )
# compute metrics
_lowerCAmelCase : Optional[int] = {}
_lowerCAmelCase : str = total_area_intersect.sum() / total_area_label.sum()
_lowerCAmelCase : str = total_area_intersect / total_area_union
_lowerCAmelCase : Dict = total_area_intersect / total_area_label
_lowerCAmelCase : Optional[int] = np.nanmean(_A )
_lowerCAmelCase : int = np.nanmean(_A )
_lowerCAmelCase : Optional[int] = all_acc
_lowerCAmelCase : str = iou
_lowerCAmelCase : Any = acc
if nan_to_num is not None:
_lowerCAmelCase : int = {metric: np.nan_to_num(_A , nan=_A ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'predictions': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
'references': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
} ) , reference_urls=[
'https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'
] , )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = False , ):
'''simple docstring'''
_lowerCAmelCase : Tuple = mean_iou(
results=snake_case__ , gt_seg_maps=snake_case__ , num_labels=snake_case__ , ignore_index=snake_case__ , nan_to_num=snake_case__ , label_map=snake_case__ , reduce_labels=snake_case__ , )
return iou_result
| 25 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase : Tuple = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowerCAmelCase : Union[str, Any] = 25_00_04
lowerCAmelCase : int = 25_00_20
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = MBartaaTokenizer
__magic_name__ = MBartaaTokenizerFast
__magic_name__ = True
__magic_name__ = True
def a ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : List[Any] = MBartaaTokenizer(snake_case__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = '<s>'
_lowerCAmelCase : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(snake_case__ ) , 1054 )
def a ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1054 )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = MBartaaTokenizer(snake_case__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=snake_case__ )
_lowerCAmelCase : Any = tokenizer.tokenize('This is a test' )
self.assertListEqual(snake_case__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCAmelCase : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
_lowerCAmelCase : Optional[int] = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(
snake_case__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCAmelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = {'input_ids': [[25_0004, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [25_0004, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_0004, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name='facebook/mbart-large-50' , revision='d3913889c59cd5c9e456b269c376325eabad57e2' , )
def a ( self ):
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_lowerCAmelCase : Optional[int] = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart50', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
_lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
_lowerCAmelCase : Tuple = tokenizer_r.save_pretrained(snake_case__ )
_lowerCAmelCase : str = tokenizer_p.save_pretrained(snake_case__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
_lowerCAmelCase : Any = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(snake_case__ , snake_case__ )
# Checks everything loads correctly in the same way
_lowerCAmelCase : List[str] = tokenizer_r.from_pretrained(snake_case__ )
_lowerCAmelCase : Optional[int] = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(snake_case__ )
# Save tokenizer rust, legacy_format=True
_lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp()
_lowerCAmelCase : Dict = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ )
_lowerCAmelCase : Any = tokenizer_p.save_pretrained(snake_case__ )
# Checks it save with the same files
self.assertSequenceEqual(snake_case__ , snake_case__ )
# Checks everything loads correctly in the same way
_lowerCAmelCase : Dict = tokenizer_r.from_pretrained(snake_case__ )
_lowerCAmelCase : List[str] = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
shutil.rmtree(snake_case__ )
# Save tokenizer rust, legacy_format=False
_lowerCAmelCase : Optional[int] = tempfile.mkdtemp()
_lowerCAmelCase : int = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ )
_lowerCAmelCase : Tuple = tokenizer_p.save_pretrained(snake_case__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_lowerCAmelCase : int = tokenizer_r.from_pretrained(snake_case__ )
_lowerCAmelCase : str = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
shutil.rmtree(snake_case__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
__magic_name__ = "facebook/mbart-large-50-one-to-many-mmt"
__magic_name__ = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
__magic_name__ = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
__magic_name__ = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2]
@classmethod
def a ( cls ):
'''simple docstring'''
_lowerCAmelCase : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
_lowerCAmelCase : Dict = 1
return cls
def a ( self ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 25_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 25_0004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 25_0020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['mr_IN'] , 25_0038 )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , snake_case__ )
def a ( self ):
'''simple docstring'''
self.assertIn(snake_case__ , self.tokenizer.all_special_ids )
_lowerCAmelCase : Union[str, Any] = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
_lowerCAmelCase : List[str] = self.tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ )
_lowerCAmelCase : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
self.assertNotIn(self.tokenizer.eos_token , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , snake_case__ )
_lowerCAmelCase : List[str] = 10
_lowerCAmelCase : Any = self.tokenizer(snake_case__ , max_length=snake_case__ , truncation=snake_case__ ).input_ids[0]
self.assertEqual(ids[0] , snake_case__ )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(snake_case__ ) , snake_case__ )
def a ( self ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [25_0053, 25_0001] )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
_lowerCAmelCase : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(snake_case__ )
_lowerCAmelCase : Tuple = MBartaaTokenizer.from_pretrained(snake_case__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , snake_case__ )
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case__ , return_tensors='pt' )
_lowerCAmelCase : Optional[int] = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
_lowerCAmelCase : int = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
_lowerCAmelCase : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , snake_case__ )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.tokenizer(self.src_text , padding=snake_case__ , truncation=snake_case__ , max_length=3 , return_tensors='pt' )
_lowerCAmelCase : str = self.tokenizer(
text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=10 , return_tensors='pt' )
_lowerCAmelCase : List[Any] = targets['input_ids']
_lowerCAmelCase : Any = shift_tokens_right(snake_case__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(snake_case__ ) , {
# en_XX, A, test, EOS
'input_ids': [[25_0004, 62, 3034, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 25_0001,
} , )
| 25 | 1 |
'''simple docstring'''
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowerCAmelCase : Optional[Any] = """."""
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
lowerCAmelCase : Dict = [
"""Assert""",
"""AssignVariableOp""",
"""EmptyTensorList""",
"""MergeV2Checkpoints""",
"""ReadVariableOp""",
"""ResourceGather""",
"""RestoreV2""",
"""SaveV2""",
"""ShardedFilename""",
"""StatefulPartitionedCall""",
"""StaticRegexFullMatch""",
"""VarHandleOp""",
]
def lowercase (_A , _A , _A ):
"""simple docstring"""
_lowerCAmelCase : Any = SavedModel()
_lowerCAmelCase : Dict = []
with open(os.path.join(_A , 'utils' , 'tf_ops' , 'onnx.json' ) ) as f:
_lowerCAmelCase : Optional[int] = json.load(_A )['opsets']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(_A )] )
with open(_A , 'rb' ) as f:
saved_model.ParseFromString(f.read() )
_lowerCAmelCase : List[str] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
_lowerCAmelCase : str = sorted(_A )
_lowerCAmelCase : str = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(_A )
if strict and len(_A ) > 0:
raise Exception(f'Found the following incompatible ops for the opset {opset}:\n' + incompatible_ops )
elif len(_A ) > 0:
print(f'Found the following incompatible ops for the opset {opset}:' )
print(*_A , sep='\n' )
else:
print(f'The saved model {saved_model_path} can properly be converted with ONNX.' )
if __name__ == "__main__":
lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--saved_model_path""", help="""Path of the saved model to check (the .pb file).""")
parser.add_argument(
"""--opset""", default=12, type=int, help="""The ONNX opset against which the model has to be tested."""
)
parser.add_argument(
"""--framework""", choices=["""onnx"""], default="""onnx""", help="""Frameworks against which to test the saved model."""
)
parser.add_argument(
"""--strict""", action="""store_true""", help="""Whether make the checking strict (raise errors) or not (raise warnings)"""
)
lowerCAmelCase : str = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 25 |
'''simple docstring'''
from math import isqrt
def lowercase (_A ):
"""simple docstring"""
return all(number % divisor != 0 for divisor in range(2 , isqrt(_A ) + 1 ) )
def lowercase (_A = 1_0**6 ):
"""simple docstring"""
_lowerCAmelCase : str = 0
_lowerCAmelCase : str = 1
_lowerCAmelCase : List[str] = 7
while prime_candidate < max_prime:
primes_count += is_prime(_A )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 25 | 1 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
lowerCAmelCase : List[Any] = logging.getLogger(__name__)
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "summarization"
__magic_name__ = ["loss"]
__magic_name__ = ROUGE_KEYS
__magic_name__ = "rouge2"
def __init__( self , snake_case__ , **snake_case__ ):
'''simple docstring'''
if hparams.sortish_sampler and hparams.gpus > 1:
_lowerCAmelCase : Dict = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training' )
if hparams.sortish_sampler:
raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously' )
super().__init__(snake_case__ , num_labels=snake_case__ , mode=self.mode , **snake_case__ )
use_task_specific_params(self.model , 'summarization' )
save_git_info(self.hparams.output_dir )
_lowerCAmelCase : Tuple = Path(self.output_dir ) / 'metrics.json'
_lowerCAmelCase : List[Any] = Path(self.output_dir ) / 'hparams.pkl'
pickle_save(self.hparams , self.hparams_save_path )
_lowerCAmelCase : Optional[Any] = 0
_lowerCAmelCase : List[str] = defaultdict(snake_case__ )
_lowerCAmelCase : Any = self.config.model_type
_lowerCAmelCase : Optional[Any] = self.config.tgt_vocab_size if self.model_type == 'fsmt' else self.config.vocab_size
_lowerCAmelCase : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
_lowerCAmelCase : str = {
'train': self.hparams.n_train,
'val': self.hparams.n_val,
'test': self.hparams.n_test,
}
_lowerCAmelCase : int = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
_lowerCAmelCase : List[str] = {
'train': self.hparams.max_target_length,
'val': self.hparams.val_max_target_length,
'test': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'target_lens: {self.target_lens}'
assert self.target_lens["train"] <= self.target_lens["test"], F'target_lens: {self.target_lens}'
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
_lowerCAmelCase : Dict = get_git_info()['repo_sha']
_lowerCAmelCase : Tuple = hparams.num_workers
_lowerCAmelCase : Dict = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , snake_case__ ):
_lowerCAmelCase : int = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
_lowerCAmelCase : List[Any] = self.decoder_start_token_id
_lowerCAmelCase : Optional[int] = (
SeqaSeqDataset if hasattr(self.tokenizer , 'prepare_seq2seq_batch' ) else LegacySeqaSeqDataset
)
_lowerCAmelCase : List[Any] = False
_lowerCAmelCase : Union[str, Any] = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
_lowerCAmelCase : Union[str, Any] = self.hparams.eval_max_gen_length
else:
_lowerCAmelCase : int = self.model.config.max_length
_lowerCAmelCase : Union[str, Any] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = {
k: self.tokenizer.batch_decode(v.tolist() ) if 'mask' not in k else v.shape for k, v in batch.items()
}
save_json(snake_case__ , Path(self.output_dir ) / 'text_batch.json' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / 'tok_batch.json' )
_lowerCAmelCase : str = True
return readable_batch
def a ( self , snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.model(snake_case__ , **snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.tokenizer.batch_decode(
snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ )
return lmap(str.strip , snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.tokenizer.pad_token_id
_lowerCAmelCase , _lowerCAmelCase : List[str] = batch['input_ids'], batch['attention_mask']
_lowerCAmelCase : List[Any] = batch['labels']
if isinstance(self.model , snake_case__ ):
_lowerCAmelCase : Dict = self.model._shift_right(snake_case__ )
else:
_lowerCAmelCase : Tuple = shift_tokens_right(snake_case__ , snake_case__ )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
_lowerCAmelCase : int = decoder_input_ids
self.save_readable_batch(snake_case__ )
_lowerCAmelCase : int = self(snake_case__ , attention_mask=snake_case__ , decoder_input_ids=snake_case__ , use_cache=snake_case__ )
_lowerCAmelCase : str = outputs['logits']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
_lowerCAmelCase : List[Any] = nn.CrossEntropyLoss(ignore_index=snake_case__ )
assert lm_logits.shape[-1] == self.vocab_size
_lowerCAmelCase : Tuple = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
_lowerCAmelCase : List[str] = nn.functional.log_softmax(snake_case__ , dim=-1 )
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = label_smoothed_nll_loss(
snake_case__ , snake_case__ , self.hparams.label_smoothing , ignore_index=snake_case__ )
return (loss,)
@property
def a ( self ):
'''simple docstring'''
return self.tokenizer.pad_token_id
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self._step(snake_case__ )
_lowerCAmelCase : Optional[Any] = dict(zip(self.loss_names , snake_case__ ) )
# tokens per batch
_lowerCAmelCase : List[str] = batch['input_ids'].ne(self.pad ).sum() + batch['labels'].ne(self.pad ).sum()
_lowerCAmelCase : Union[str, Any] = batch['input_ids'].shape[0]
_lowerCAmelCase : Any = batch['input_ids'].eq(self.pad ).sum()
_lowerCAmelCase : Optional[int] = batch['input_ids'].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
return self._generative_step(snake_case__ )
def a ( self , snake_case__ , snake_case__="val" ):
'''simple docstring'''
self.step_count += 1
_lowerCAmelCase : Any = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
_lowerCAmelCase : Optional[int] = losses['loss']
_lowerCAmelCase : int = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['gen_time', 'gen_len']
}
_lowerCAmelCase : int = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
_lowerCAmelCase : torch.FloatTensor = torch.tensor(snake_case__ ).type_as(snake_case__ )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(snake_case__ )
_lowerCAmelCase : Optional[Any] = {F'{prefix}_avg_{k}': x for k, x in losses.items()}
_lowerCAmelCase : Optional[Any] = self.step_count
self.metrics[prefix].append(snake_case__ ) # callback writes this to self.metrics_save_path
_lowerCAmelCase : str = flatten_list([x['preds'] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'{prefix}_loss': loss,
F'{prefix}_{self.val_metric}': metric_tensor,
}
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
return calculate_rouge(snake_case__ , snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
_lowerCAmelCase : Tuple = self.model.generate(
batch['input_ids'] , attention_mask=batch['attention_mask'] , use_cache=snake_case__ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
_lowerCAmelCase : List[Any] = (time.time() - ta) / batch['input_ids'].shape[0]
_lowerCAmelCase : List[str] = self.ids_to_clean_text(snake_case__ )
_lowerCAmelCase : List[str] = self.ids_to_clean_text(batch['labels'] )
_lowerCAmelCase : List[Any] = self._step(snake_case__ )
_lowerCAmelCase : Dict = dict(zip(self.loss_names , snake_case__ ) )
_lowerCAmelCase : Dict = self.calc_generative_metrics(snake_case__ , snake_case__ )
_lowerCAmelCase : Tuple = np.mean(lmap(snake_case__ , snake_case__ ) )
base_metrics.update(gen_time=snake_case__ , gen_len=snake_case__ , preds=snake_case__ , target=snake_case__ , **snake_case__ )
return base_metrics
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
return self._generative_step(snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
return self.validation_epoch_end(snake_case__ , prefix='test' )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.n_obs[type_path]
_lowerCAmelCase : List[str] = self.target_lens[type_path]
_lowerCAmelCase : str = self.dataset_class(
self.tokenizer , type_path=snake_case__ , n_obs=snake_case__ , max_target_length=snake_case__ , **self.dataset_kwargs , )
return dataset
def a ( self , snake_case__ , snake_case__ , snake_case__ = False ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_dataset(snake_case__ )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
_lowerCAmelCase : Optional[int] = dataset.make_sortish_sampler(snake_case__ , distributed=self.hparams.gpus > 1 )
return DataLoader(
snake_case__ , batch_size=snake_case__ , collate_fn=dataset.collate_fn , shuffle=snake_case__ , num_workers=self.num_workers , sampler=snake_case__ , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
_lowerCAmelCase : int = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
snake_case__ , batch_sampler=snake_case__ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
snake_case__ , batch_size=snake_case__ , collate_fn=dataset.collate_fn , shuffle=snake_case__ , num_workers=self.num_workers , sampler=snake_case__ , )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.get_dataloader('train' , batch_size=self.hparams.train_batch_size , shuffle=snake_case__ )
return dataloader
def a ( self ):
'''simple docstring'''
return self.get_dataloader('val' , batch_size=self.hparams.eval_batch_size )
def a ( self ):
'''simple docstring'''
return self.get_dataloader('test' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def a ( snake_case__ , snake_case__ ):
'''simple docstring'''
BaseTransformer.add_model_specific_args(snake_case__ , snake_case__ )
add_generic_args(snake_case__ , snake_case__ )
parser.add_argument(
'--max_source_length' , default=1024 , type=snake_case__ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--max_target_length' , default=56 , type=snake_case__ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--val_max_target_length' , default=142 , type=snake_case__ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--test_max_target_length' , default=142 , type=snake_case__ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument('--freeze_encoder' , action='store_true' )
parser.add_argument('--freeze_embeds' , action='store_true' )
parser.add_argument('--sortish_sampler' , action='store_true' , default=snake_case__ )
parser.add_argument('--overwrite_output_dir' , action='store_true' , default=snake_case__ )
parser.add_argument('--max_tokens_per_batch' , type=snake_case__ , default=snake_case__ )
parser.add_argument('--logger_name' , type=snake_case__ , choices=['default', 'wandb', 'wandb_shared'] , default='default' )
parser.add_argument('--n_train' , type=snake_case__ , default=-1 , required=snake_case__ , help='# examples. -1 means use all.' )
parser.add_argument('--n_val' , type=snake_case__ , default=500 , required=snake_case__ , help='# examples. -1 means use all.' )
parser.add_argument('--n_test' , type=snake_case__ , default=-1 , required=snake_case__ , help='# examples. -1 means use all.' )
parser.add_argument(
'--task' , type=snake_case__ , default='summarization' , required=snake_case__ , help='# examples. -1 means use all.' )
parser.add_argument('--label_smoothing' , type=snake_case__ , default=0.0 , required=snake_case__ )
parser.add_argument('--src_lang' , type=snake_case__ , default='' , required=snake_case__ )
parser.add_argument('--tgt_lang' , type=snake_case__ , default='' , required=snake_case__ )
parser.add_argument('--eval_beams' , type=snake_case__ , default=snake_case__ , required=snake_case__ )
parser.add_argument(
'--val_metric' , type=snake_case__ , default=snake_case__ , required=snake_case__ , choices=['bleu', 'rouge2', 'loss', None] )
parser.add_argument('--eval_max_gen_length' , type=snake_case__ , default=snake_case__ , help='never generate more than n tokens' )
parser.add_argument('--save_top_k' , type=snake_case__ , default=1 , required=snake_case__ , help='How many checkpoints to save' )
parser.add_argument(
'--early_stopping_patience' , type=snake_case__ , default=-1 , required=snake_case__ , help=(
'-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'
' val_check_interval will effect it.'
) , )
return parser
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "translation"
__magic_name__ = ["loss"]
__magic_name__ = ["bleu"]
__magic_name__ = "bleu"
def __init__( self , snake_case__ , **snake_case__ ):
'''simple docstring'''
super().__init__(snake_case__ , **snake_case__ )
_lowerCAmelCase : List[Any] = hparams.src_lang
_lowerCAmelCase : Optional[int] = hparams.tgt_lang
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
return calculate_bleu(snake_case__ , snake_case__ )
def lowercase (_A , _A=None ):
"""simple docstring"""
Path(args.output_dir ).mkdir(exist_ok=_A )
check_output_dir(_A , expected_items=3 )
if model is None:
if "summarization" in args.task:
_lowerCAmelCase : SummarizationModule = SummarizationModule(_A )
else:
_lowerCAmelCase : SummarizationModule = TranslationModule(_A )
_lowerCAmelCase : List[str] = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('/tmp' )
or str(args.output_dir ).startswith('/var' )
):
_lowerCAmelCase : List[str] = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
_lowerCAmelCase : Dict = os.environ.get('WANDB_PROJECT' , _A )
_lowerCAmelCase : Any = WandbLogger(name=model.output_dir.name , project=_A )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
_lowerCAmelCase : Any = WandbLogger(name=model.output_dir.name , project=f'hf_{dataset}' )
if args.early_stopping_patience >= 0:
_lowerCAmelCase : Tuple = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
_lowerCAmelCase : str = False
_lowerCAmelCase : Optional[Any] = args.val_metric == 'loss'
_lowerCAmelCase : pl.Trainer = generic_train(
_A , _A , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , _A ) , early_stopping_callback=_A , logger=_A , )
pickle_save(model.hparams , model.output_dir / 'hparams.pkl' )
if not args.do_predict:
return model
_lowerCAmelCase : Optional[Any] = ''
_lowerCAmelCase : int = sorted(glob.glob(os.path.join(args.output_dir , '*.ckpt' ) , recursive=_A ) )
if checkpoints:
_lowerCAmelCase : Tuple = checkpoints[-1]
_lowerCAmelCase : Tuple = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
lowerCAmelCase : List[Any] = pl.Trainer.add_argparse_args(parser)
lowerCAmelCase : List[str] = SummarizationModule.add_model_specific_args(parser, os.getcwd())
lowerCAmelCase : str = parser.parse_args()
main(args)
| 25 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : List[Any] = {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""",
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "mvp"
__magic_name__ = ["past_key_values"]
__magic_name__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , snake_case__=5_0267 , snake_case__=1024 , snake_case__=12 , snake_case__=4096 , snake_case__=16 , snake_case__=12 , snake_case__=4096 , snake_case__=16 , snake_case__=0.0 , snake_case__=0.0 , snake_case__="gelu" , snake_case__=1024 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=0.0 , snake_case__=False , snake_case__=True , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__=True , snake_case__=2 , snake_case__=2 , snake_case__=False , snake_case__=100 , snake_case__=800 , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Any = max_position_embeddings
_lowerCAmelCase : Optional[Any] = d_model
_lowerCAmelCase : Optional[int] = encoder_ffn_dim
_lowerCAmelCase : Optional[int] = encoder_layers
_lowerCAmelCase : Any = encoder_attention_heads
_lowerCAmelCase : Any = decoder_ffn_dim
_lowerCAmelCase : Optional[Any] = decoder_layers
_lowerCAmelCase : int = decoder_attention_heads
_lowerCAmelCase : Union[str, Any] = dropout
_lowerCAmelCase : List[Any] = attention_dropout
_lowerCAmelCase : List[str] = activation_dropout
_lowerCAmelCase : Optional[Any] = activation_function
_lowerCAmelCase : Any = init_std
_lowerCAmelCase : Any = encoder_layerdrop
_lowerCAmelCase : Union[str, Any] = decoder_layerdrop
_lowerCAmelCase : Optional[int] = classifier_dropout
_lowerCAmelCase : List[Any] = use_cache
_lowerCAmelCase : Optional[int] = encoder_layers
_lowerCAmelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCAmelCase : Optional[Any] = use_prompt
_lowerCAmelCase : Optional[Any] = prompt_length
_lowerCAmelCase : Any = prompt_mid_dim
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , decoder_start_token_id=snake_case__ , forced_eos_token_id=snake_case__ , **snake_case__ , )
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , snake_case__ ):
_lowerCAmelCase : Any = self.bos_token_id
warnings.warn(
F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'The config can simply be saved and uploaded again to be fixed.' )
| 25 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase : Tuple = logging.get_logger(__name__)
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : str = DPTConfig()
if "large" in checkpoint_url:
_lowerCAmelCase : Tuple = 1_0_2_4
_lowerCAmelCase : Union[str, Any] = 4_0_9_6
_lowerCAmelCase : List[Any] = 2_4
_lowerCAmelCase : Optional[int] = 1_6
_lowerCAmelCase : List[str] = [5, 1_1, 1_7, 2_3]
_lowerCAmelCase : Union[str, Any] = [2_5_6, 5_1_2, 1_0_2_4, 1_0_2_4]
_lowerCAmelCase : Optional[Any] = (1, 3_8_4, 3_8_4)
if "ade" in checkpoint_url:
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : Tuple = 1_5_0
_lowerCAmelCase : List[str] = 'huggingface/label-files'
_lowerCAmelCase : Any = 'ade20k-id2label.json'
_lowerCAmelCase : int = json.load(open(cached_download(hf_hub_url(_A , _A , repo_type='dataset' ) ) , 'r' ) )
_lowerCAmelCase : List[Any] = {int(_A ): v for k, v in idalabel.items()}
_lowerCAmelCase : Dict = idalabel
_lowerCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
_lowerCAmelCase : int = [1, 1_5_0, 4_8_0, 4_8_0]
return config, expected_shape
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : List[str] = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(_A , _A )
def lowercase (_A ):
"""simple docstring"""
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_lowerCAmelCase : Tuple = name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
_lowerCAmelCase : str = name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
_lowerCAmelCase : Union[str, Any] = name.replace('patch_embed' , 'patch_embeddings' )
if "pos_embed" in name:
_lowerCAmelCase : List[Any] = name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
_lowerCAmelCase : List[Any] = name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
_lowerCAmelCase : Any = name.replace('proj' , 'projection' )
if "blocks" in name:
_lowerCAmelCase : Dict = name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
_lowerCAmelCase : str = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_lowerCAmelCase : Dict = name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name:
_lowerCAmelCase : List[Any] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_lowerCAmelCase : Any = name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
_lowerCAmelCase : List[Any] = name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
_lowerCAmelCase : Optional[Any] = name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
_lowerCAmelCase : Union[str, Any] = name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
_lowerCAmelCase : Optional[int] = name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
_lowerCAmelCase : List[Any] = name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
_lowerCAmelCase : List[str] = name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
_lowerCAmelCase : Union[str, Any] = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_lowerCAmelCase : List[str] = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
_lowerCAmelCase : Dict = name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
_lowerCAmelCase : str = name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
_lowerCAmelCase : int = name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
_lowerCAmelCase : int = name.replace('conv1' , 'convolution1' )
if "conv2" in name:
_lowerCAmelCase : Union[str, Any] = name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_lowerCAmelCase : Union[str, Any] = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
_lowerCAmelCase : List[Any] = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
_lowerCAmelCase : Optional[int] = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
_lowerCAmelCase : Union[str, Any] = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_lowerCAmelCase : Dict = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
_lowerCAmelCase : Any = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
_lowerCAmelCase : str = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
_lowerCAmelCase : Dict = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
_lowerCAmelCase : Dict = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
_lowerCAmelCase : int = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
_lowerCAmelCase : Optional[Any] = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
_lowerCAmelCase : Dict = name.replace('pretrained' , 'dpt' )
if "bn" in name:
_lowerCAmelCase : Union[str, Any] = name.replace('bn' , 'batch_norm' )
if "head" in name:
_lowerCAmelCase : List[Any] = name.replace('head' , 'head.head' )
if "encoder.norm" in name:
_lowerCAmelCase : Dict = name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
_lowerCAmelCase : Dict = name.replace('auxlayer' , 'auxiliary_head.head' )
return name
def lowercase (_A , _A ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCAmelCase : str = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight' )
_lowerCAmelCase : Optional[int] = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase : str = in_proj_weight[: config.hidden_size, :]
_lowerCAmelCase : Any = in_proj_bias[: config.hidden_size]
_lowerCAmelCase : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCAmelCase : Union[str, Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCAmelCase : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
_lowerCAmelCase : Optional[Any] = in_proj_bias[-config.hidden_size :]
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Optional[int] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowerCAmelCase : Tuple = Image.open(requests.get(_A , stream=_A ).raw )
return im
@torch.no_grad()
def lowercase (_A , _A , _A , _A ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase : List[Any] = get_dpt_config(_A )
# load original state_dict from URL
_lowerCAmelCase : Tuple = torch.hub.load_state_dict_from_url(_A , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(_A )
# rename keys
for key in state_dict.copy().keys():
_lowerCAmelCase : Any = state_dict.pop(_A )
_lowerCAmelCase : Any = val
# read in qkv matrices
read_in_q_k_v(_A , _A )
# load HuggingFace model
_lowerCAmelCase : Dict = DPTForSemanticSegmentation(_A ) if 'ade' in checkpoint_url else DPTForDepthEstimation(_A )
model.load_state_dict(_A )
model.eval()
# Check outputs on an image
_lowerCAmelCase : Any = 4_8_0 if 'ade' in checkpoint_url else 3_8_4
_lowerCAmelCase : List[str] = DPTImageProcessor(size=_A )
_lowerCAmelCase : List[Any] = prepare_img()
_lowerCAmelCase : Dict = image_processor(_A , return_tensors='pt' )
# forward pass
_lowerCAmelCase : Union[str, Any] = model(**_A ).logits if 'ade' in checkpoint_url else model(**_A ).predicted_depth
# Assert logits
_lowerCAmelCase : Optional[Any] = torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]] )
if "ade" in checkpoint_url:
_lowerCAmelCase : Union[str, Any] = torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]] )
assert outputs.shape == torch.Size(_A )
assert (
torch.allclose(outputs[0, 0, :3, :3] , _A , atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , _A )
)
Path(_A ).mkdir(exist_ok=_A )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_A )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_A )
if push_to_hub:
print('Pushing model to hub...' )
model.push_to_hub(
repo_path_or_name=Path(_A , _A ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=_A , )
image_processor.push_to_hub(
repo_path_or_name=Path(_A , _A ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=_A , )
if __name__ == "__main__":
lowerCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
lowerCAmelCase : int = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 25 |
'''simple docstring'''
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"""The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"""
)
lowerCAmelCase : str = None
lowerCAmelCase : Optional[int] = {
"""7B""": 1_10_08,
"""13B""": 1_38_24,
"""30B""": 1_79_20,
"""65B""": 2_20_16,
"""70B""": 2_86_72,
}
lowerCAmelCase : Optional[int] = {
"""7B""": 1,
"""7Bf""": 1,
"""13B""": 2,
"""13Bf""": 2,
"""30B""": 4,
"""65B""": 8,
"""70B""": 8,
"""70Bf""": 8,
}
def lowercase (_A , _A=1 , _A=2_5_6 ):
"""simple docstring"""
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def lowercase (_A ):
"""simple docstring"""
with open(_A , 'r' ) as f:
return json.load(_A )
def lowercase (_A , _A ):
"""simple docstring"""
with open(_A , 'w' ) as f:
json.dump(_A , _A )
def lowercase (_A , _A , _A , _A=True ):
"""simple docstring"""
os.makedirs(_A , exist_ok=_A )
_lowerCAmelCase : Optional[Any] = os.path.join(_A , 'tmp' )
os.makedirs(_A , exist_ok=_A )
_lowerCAmelCase : Any = read_json(os.path.join(_A , 'params.json' ) )
_lowerCAmelCase : List[str] = NUM_SHARDS[model_size]
_lowerCAmelCase : str = params['n_layers']
_lowerCAmelCase : Optional[int] = params['n_heads']
_lowerCAmelCase : int = n_heads // num_shards
_lowerCAmelCase : Optional[int] = params['dim']
_lowerCAmelCase : Union[str, Any] = dim // n_heads
_lowerCAmelCase : Union[str, Any] = 10_000.0
_lowerCAmelCase : str = 1.0 / (base ** (torch.arange(0 , _A , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
_lowerCAmelCase : Optional[Any] = params['n_kv_heads'] # for GQA / MQA
_lowerCAmelCase : str = n_heads_per_shard // num_key_value_heads
_lowerCAmelCase : Optional[int] = dim // num_key_value_heads
else: # compatibility with other checkpoints
_lowerCAmelCase : Union[str, Any] = n_heads
_lowerCAmelCase : Any = n_heads_per_shard
_lowerCAmelCase : Optional[Any] = dim
# permute for sliced rotary
def permute(_A , _A=n_heads , _A=dim , _A=dim ):
return w.view(_A , dima // n_heads // 2 , 2 , _A ).transpose(1 , 2 ).reshape(_A , _A )
print(f'Fetching all parameters from the checkpoint at {input_base_path}.' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
_lowerCAmelCase : List[Any] = torch.load(os.path.join(_A , 'consolidated.00.pth' ) , map_location='cpu' )
else:
# Sharded
_lowerCAmelCase : List[Any] = [
torch.load(os.path.join(_A , f'consolidated.{i:02d}.pth' ) , map_location='cpu' )
for i in range(_A )
]
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Union[str, Any] = {'weight_map': {}}
for layer_i in range(_A ):
_lowerCAmelCase : List[str] = f'pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
_lowerCAmelCase : str = {
f'model.layers.{layer_i}.self_attn.q_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wq.weight'] ),
f'model.layers.{layer_i}.self_attn.k_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wk.weight'] ),
f'model.layers.{layer_i}.self_attn.v_proj.weight': loaded[f'layers.{layer_i}.attention.wv.weight'],
f'model.layers.{layer_i}.self_attn.o_proj.weight': loaded[f'layers.{layer_i}.attention.wo.weight'],
f'model.layers.{layer_i}.mlp.gate_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w1.weight'],
f'model.layers.{layer_i}.mlp.down_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w2.weight'],
f'model.layers.{layer_i}.mlp.up_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w3.weight'],
f'model.layers.{layer_i}.input_layernorm.weight': loaded[f'layers.{layer_i}.attention_norm.weight'],
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[f'layers.{layer_i}.ffn_norm.weight'],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
_lowerCAmelCase : str = {
f'model.layers.{layer_i}.input_layernorm.weight': loaded[0][
f'layers.{layer_i}.attention_norm.weight'
].clone(),
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[0][
f'layers.{layer_i}.ffn_norm.weight'
].clone(),
}
_lowerCAmelCase : List[str] = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wq.weight'].view(_A , _A , _A )
for i in range(_A )
] , dim=0 , ).reshape(_A , _A ) )
_lowerCAmelCase : Optional[int] = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wk.weight'].view(
_A , _A , _A )
for i in range(_A )
] , dim=0 , ).reshape(_A , _A ) , _A , _A , _A , )
_lowerCAmelCase : Dict = torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wv.weight'].view(
_A , _A , _A )
for i in range(_A )
] , dim=0 , ).reshape(_A , _A )
_lowerCAmelCase : Dict = torch.cat(
[loaded[i][f'layers.{layer_i}.attention.wo.weight'] for i in range(_A )] , dim=1 )
_lowerCAmelCase : List[Any] = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w1.weight'] for i in range(_A )] , dim=0 )
_lowerCAmelCase : Tuple = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w2.weight'] for i in range(_A )] , dim=1 )
_lowerCAmelCase : List[Any] = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w3.weight'] for i in range(_A )] , dim=0 )
_lowerCAmelCase : int = inv_freq
for k, v in state_dict.items():
_lowerCAmelCase : Optional[Any] = filename
param_count += v.numel()
torch.save(_A , os.path.join(_A , _A ) )
_lowerCAmelCase : Dict = f'pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
_lowerCAmelCase : List[str] = {
'model.embed_tokens.weight': loaded['tok_embeddings.weight'],
'model.norm.weight': loaded['norm.weight'],
'lm_head.weight': loaded['output.weight'],
}
else:
_lowerCAmelCase : List[str] = {
'model.norm.weight': loaded[0]['norm.weight'],
'model.embed_tokens.weight': torch.cat(
[loaded[i]['tok_embeddings.weight'] for i in range(_A )] , dim=1 ),
'lm_head.weight': torch.cat([loaded[i]['output.weight'] for i in range(_A )] , dim=0 ),
}
for k, v in state_dict.items():
_lowerCAmelCase : int = filename
param_count += v.numel()
torch.save(_A , os.path.join(_A , _A ) )
# Write configs
_lowerCAmelCase : Tuple = {'total_size': param_count * 2}
write_json(_A , os.path.join(_A , 'pytorch_model.bin.index.json' ) )
_lowerCAmelCase : Optional[int] = params['ffn_dim_multiplier'] if 'ffn_dim_multiplier' in params else 1
_lowerCAmelCase : int = params['multiple_of'] if 'multiple_of' in params else 2_5_6
_lowerCAmelCase : List[Any] = LlamaConfig(
hidden_size=_A , intermediate_size=compute_intermediate_size(_A , _A , _A ) , num_attention_heads=params['n_heads'] , num_hidden_layers=params['n_layers'] , rms_norm_eps=params['norm_eps'] , num_key_value_heads=_A , )
config.save_pretrained(_A )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('Loading the checkpoint in a Llama model.' )
_lowerCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained(_A , torch_dtype=torch.floataa , low_cpu_mem_usage=_A )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('Saving in the Transformers format.' )
model.save_pretrained(_A , safe_serialization=_A )
shutil.rmtree(_A )
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'Saving a {tokenizer_class.__name__} to {tokenizer_path}.' )
_lowerCAmelCase : List[Any] = tokenizer_class(_A )
tokenizer.save_pretrained(_A )
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument(
'--input_dir' , help='Location of LLaMA weights, which contains tokenizer.model and model folders' , )
parser.add_argument(
'--model_size' , choices=['7B', '7Bf', '13B', '13Bf', '30B', '65B', '70B', '70Bf', 'tokenizer_only'] , )
parser.add_argument(
'--output_dir' , help='Location to write HF model and tokenizer' , )
parser.add_argument('--safe_serialization' , type=_A , help='Whether or not to save using `safetensors`.' )
_lowerCAmelCase : Any = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
_lowerCAmelCase : Dict = os.path.join(args.input_dir , 'tokenizer.model' )
write_tokenizer(args.output_dir , _A )
if __name__ == "__main__":
main()
| 25 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase : Any = {
"""uclanlp/visualbert-vqa""": """https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-pre""": """https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-vcr""": """https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-pre""": """https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-nlvr2""": """https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-pre""": """https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"""
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "visual_bert"
def __init__( self , snake_case__=3_0522 , snake_case__=768 , snake_case__=512 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=False , snake_case__=True , snake_case__=1 , snake_case__=0 , snake_case__=2 , **snake_case__ , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
_lowerCAmelCase : Dict = vocab_size
_lowerCAmelCase : Dict = max_position_embeddings
_lowerCAmelCase : Union[str, Any] = hidden_size
_lowerCAmelCase : Any = visual_embedding_dim
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : List[str] = intermediate_size
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase : Tuple = attention_probs_dropout_prob
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : Tuple = type_vocab_size
_lowerCAmelCase : str = layer_norm_eps
_lowerCAmelCase : int = bypass_transformer
_lowerCAmelCase : Union[str, Any] = special_visual_initialize
| 25 |
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
__magic_name__ = None
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = None
__magic_name__ = None
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = True
__magic_name__ = None
__magic_name__ = 1
__magic_name__ = None
__magic_name__ = False
__magic_name__ = None
__magic_name__ = None
def a ( self ):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(snake_case__ ) for k, v in self.__dict__.items()} )
| 25 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase : List[str] = {
"""facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "vit_mae"
def __init__( self , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=224 , snake_case__=16 , snake_case__=3 , snake_case__=True , snake_case__=16 , snake_case__=512 , snake_case__=8 , snake_case__=2048 , snake_case__=0.75 , snake_case__=False , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : Union[str, Any] = num_hidden_layers
_lowerCAmelCase : int = num_attention_heads
_lowerCAmelCase : Any = intermediate_size
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : List[str] = hidden_dropout_prob
_lowerCAmelCase : List[str] = attention_probs_dropout_prob
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Dict = layer_norm_eps
_lowerCAmelCase : List[str] = image_size
_lowerCAmelCase : int = patch_size
_lowerCAmelCase : Dict = num_channels
_lowerCAmelCase : Tuple = qkv_bias
_lowerCAmelCase : Tuple = decoder_num_attention_heads
_lowerCAmelCase : Union[str, Any] = decoder_hidden_size
_lowerCAmelCase : int = decoder_num_hidden_layers
_lowerCAmelCase : List[Any] = decoder_intermediate_size
_lowerCAmelCase : Any = mask_ratio
_lowerCAmelCase : Optional[int] = norm_pix_loss
| 25 |
'''simple docstring'''
lowerCAmelCase : List[str] = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCAmelCase : int = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCAmelCase : List[str] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 25 | 1 |
'''simple docstring'''
lowerCAmelCase : int = frozenset(
[
"""prompt""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
lowerCAmelCase : Optional[int] = frozenset(["""prompt""", """negative_prompt"""])
lowerCAmelCase : Optional[int] = frozenset([])
lowerCAmelCase : int = frozenset(["""image"""])
lowerCAmelCase : Dict = frozenset(
[
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
lowerCAmelCase : Optional[int] = frozenset(["""image"""])
lowerCAmelCase : List[Any] = frozenset(
[
"""prompt""",
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
lowerCAmelCase : List[Any] = frozenset(["""prompt""", """image""", """negative_prompt"""])
lowerCAmelCase : List[str] = frozenset(
[
# Text guided image variation with an image mask
"""prompt""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
lowerCAmelCase : Dict = frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""])
lowerCAmelCase : List[str] = frozenset(
[
# image variation with an image mask
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
lowerCAmelCase : Any = frozenset(["""image""", """mask_image"""])
lowerCAmelCase : Any = frozenset(
[
"""example_image""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
lowerCAmelCase : str = frozenset(["""example_image""", """image""", """mask_image"""])
lowerCAmelCase : Dict = frozenset(["""class_labels"""])
lowerCAmelCase : Any = frozenset(["""class_labels"""])
lowerCAmelCase : str = frozenset(["""batch_size"""])
lowerCAmelCase : Dict = frozenset([])
lowerCAmelCase : str = frozenset(["""batch_size"""])
lowerCAmelCase : Any = frozenset([])
lowerCAmelCase : int = frozenset(
[
"""prompt""",
"""audio_length_in_s""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
lowerCAmelCase : List[Any] = frozenset(["""prompt""", """negative_prompt"""])
lowerCAmelCase : Dict = frozenset(["""input_tokens"""])
lowerCAmelCase : str = frozenset(["""input_tokens"""])
| 25 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase : Union[str, Any] = {
"""configuration_resnet""": ["""RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ResNetConfig""", """ResNetOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = [
"""RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ResNetForImageClassification""",
"""ResNetModel""",
"""ResNetPreTrainedModel""",
"""ResNetBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
"""TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFResNetForImageClassification""",
"""TFResNetModel""",
"""TFResNetPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
"""FlaxResNetForImageClassification""",
"""FlaxResNetModel""",
"""FlaxResNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
lowerCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 25 | 1 |
'''simple docstring'''
import functools
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = len(_A )
_lowerCAmelCase : List[str] = len(_A )
@functools.cache
def min_distance(_A , _A ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
_lowerCAmelCase : Any = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , _A ) , 1 + min_distance(_A , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {
"""shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "nat"
__magic_name__ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , snake_case__=4 , snake_case__=3 , snake_case__=64 , snake_case__=[3, 4, 6, 5] , snake_case__=[2, 4, 8, 16] , snake_case__=7 , snake_case__=3.0 , snake_case__=True , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.1 , snake_case__="gelu" , snake_case__=0.02 , snake_case__=1E-5 , snake_case__=0.0 , snake_case__=None , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Union[str, Any] = patch_size
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Tuple = embed_dim
_lowerCAmelCase : Any = depths
_lowerCAmelCase : Dict = len(snake_case__ )
_lowerCAmelCase : str = num_heads
_lowerCAmelCase : Dict = kernel_size
_lowerCAmelCase : Union[str, Any] = mlp_ratio
_lowerCAmelCase : int = qkv_bias
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCAmelCase : List[str] = drop_path_rate
_lowerCAmelCase : Union[str, Any] = hidden_act
_lowerCAmelCase : Tuple = layer_norm_eps
_lowerCAmelCase : Dict = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase : str = int(embed_dim * 2 ** (len(snake_case__ ) - 1) )
_lowerCAmelCase : Any = layer_scale_init_value
_lowerCAmelCase : Any = ['stem'] + [F'stage{idx}' for idx in range(1 , len(snake_case__ ) + 1 )]
_lowerCAmelCase , _lowerCAmelCase : str = get_aligned_output_features_output_indices(
out_features=snake_case__ , out_indices=snake_case__ , stage_names=self.stage_names )
| 25 | 1 |
'''simple docstring'''
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def lowercase (_A , _A ):
"""simple docstring"""
assert isinstance(_A , _A )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def lowercase (_A , _A , _A , _A ):
"""simple docstring"""
_lowerCAmelCase : str = tmp_path / 'cache'
_lowerCAmelCase : Optional[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowerCAmelCase : Optional[int] = SqlDatasetReader(
'dataset' , 'sqlite:///' + sqlite_path , cache_dir=_A , keep_in_memory=_A ).read()
_check_sql_dataset(_A , _A )
@require_sqlalchemy
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def lowercase (_A , _A , _A , _A ):
"""simple docstring"""
_lowerCAmelCase : Dict = tmp_path / 'cache'
_lowerCAmelCase : List[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
_lowerCAmelCase : Tuple = features.copy() if features else default_expected_features
_lowerCAmelCase : Optional[Any] = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowerCAmelCase : List[Any] = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , features=_A , cache_dir=_A ).read()
_check_sql_dataset(_A , _A )
def lowercase (_A ):
"""simple docstring"""
with contextlib.closing(sqlitea.connect(_A ) ) as con:
_lowerCAmelCase : Any = con.cursor()
cur.execute('SELECT * FROM dataset' )
for row in cur:
yield row
@require_sqlalchemy
def lowercase (_A , _A , _A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = tmp_path / 'cache'
_lowerCAmelCase : Union[str, Any] = os.path.join(_A , 'tmp.sql' )
_lowerCAmelCase : Any = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=_A ).read()
SqlDatasetWriter(_A , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=1 ).write()
_lowerCAmelCase : int = iter_sql_file(_A )
_lowerCAmelCase : List[str] = iter_sql_file(_A )
for rowa, rowa in zip(_A , _A ):
assert rowa == rowa
@require_sqlalchemy
def lowercase (_A , _A , _A ):
"""simple docstring"""
_lowerCAmelCase : str = tmp_path / 'cache'
_lowerCAmelCase : Any = os.path.join(_A , 'tmp.sql' )
_lowerCAmelCase : int = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=_A ).read()
SqlDatasetWriter(_A , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=2 ).write()
_lowerCAmelCase : Any = iter_sql_file(_A )
_lowerCAmelCase : List[Any] = iter_sql_file(_A )
for rowa, rowa in zip(_A , _A ):
assert rowa == rowa
@require_sqlalchemy
def lowercase (_A , _A , _A ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = tmp_path / 'cache'
_lowerCAmelCase : Optional[int] = os.path.join(_A , 'tmp.sql' )
_lowerCAmelCase : List[Any] = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=_A ).read()
with pytest.raises(_A ):
SqlDatasetWriter(_A , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=0 ).write()
| 25 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Dict = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : str = {
"""vocab_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"""
),
},
"""tokenizer_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""",
"""roberta-base-openai-detector""": (
"""https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"""
),
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : List[str] = {
"""roberta-base""": 5_12,
"""roberta-large""": 5_12,
"""roberta-large-mnli""": 5_12,
"""distilroberta-base""": 5_12,
"""roberta-base-openai-detector""": 5_12,
"""roberta-large-openai-detector""": 5_12,
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ["input_ids", "attention_mask"]
__magic_name__ = RobertaTokenizer
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__="replace" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=False , snake_case__=True , **snake_case__ , ):
'''simple docstring'''
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , )
_lowerCAmelCase : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
_lowerCAmelCase : Tuple = getattr(snake_case__ , pre_tok_state.pop('type' ) )
_lowerCAmelCase : List[Any] = add_prefix_space
_lowerCAmelCase : List[str] = pre_tok_class(**snake_case__ )
_lowerCAmelCase : Union[str, Any] = add_prefix_space
_lowerCAmelCase : Union[str, Any] = 'post_processor'
_lowerCAmelCase : int = getattr(self.backend_tokenizer , snake_case__ , snake_case__ )
if tokenizer_component_instance:
_lowerCAmelCase : Dict = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCAmelCase : Any = tuple(state['sep'] )
if "cls" in state:
_lowerCAmelCase : str = tuple(state['cls'] )
_lowerCAmelCase : List[str] = False
if state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
_lowerCAmelCase : int = add_prefix_space
_lowerCAmelCase : Tuple = True
if state.get('trim_offsets' , snake_case__ ) != trim_offsets:
_lowerCAmelCase : Union[str, Any] = trim_offsets
_lowerCAmelCase : Optional[int] = True
if changes_to_apply:
_lowerCAmelCase : Any = getattr(snake_case__ , state.pop('type' ) )
_lowerCAmelCase : Optional[int] = component_class(**snake_case__ )
setattr(self.backend_tokenizer , snake_case__ , snake_case__ )
@property
def a ( self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value
_lowerCAmelCase : Tuple = value
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = kwargs.get('is_split_into_words' , snake_case__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = kwargs.get('is_split_into_words' , snake_case__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*snake_case__ , **snake_case__ )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : int = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def a ( self , snake_case__ , snake_case__=None ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 25 | 1 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
lowerCAmelCase : List[str] = """
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
"""
lowerCAmelCase : Any = """
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
25.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
50.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
75.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results[\"exact_match\"], 1))
100.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]
>>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
33.3
"""
lowerCAmelCase : Any = """
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , reference_urls=[] , )
def a ( self , snake_case__ , snake_case__ , snake_case__=None , snake_case__=False , snake_case__=False , snake_case__=False , ):
'''simple docstring'''
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
_lowerCAmelCase : Optional[Any] = np.array([re.sub(snake_case__ , '' , snake_case__ ) for x in predictions] )
_lowerCAmelCase : Tuple = np.array([re.sub(snake_case__ , '' , snake_case__ ) for x in references] )
else:
_lowerCAmelCase : str = np.asarray(snake_case__ )
_lowerCAmelCase : Any = np.asarray(snake_case__ )
if ignore_case:
_lowerCAmelCase : Dict = np.char.lower(snake_case__ )
_lowerCAmelCase : List[Any] = np.char.lower(snake_case__ )
if ignore_punctuation:
_lowerCAmelCase : Optional[Any] = string.punctuation.maketrans('' , '' , string.punctuation )
_lowerCAmelCase : Dict = np.char.translate(snake_case__ , table=snake_case__ )
_lowerCAmelCase : Tuple = np.char.translate(snake_case__ , table=snake_case__ )
if ignore_numbers:
_lowerCAmelCase : Optional[Any] = string.digits.maketrans('' , '' , string.digits )
_lowerCAmelCase : Dict = np.char.translate(snake_case__ , table=snake_case__ )
_lowerCAmelCase : int = np.char.translate(snake_case__ , table=snake_case__ )
_lowerCAmelCase : int = predictions == references
return {"exact_match": np.mean(snake_case__ ) * 100}
| 25 |
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = 0 # The first color of the flag.
lowerCAmelCase : Optional[int] = 1 # The second color of the flag.
lowerCAmelCase : int = 2 # The third color of the flag.
lowerCAmelCase : Any = (red, white, blue)
def lowercase (_A ):
"""simple docstring"""
if not sequence:
return []
if len(_A ) == 1:
return list(_A )
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : List[str] = len(_A ) - 1
_lowerCAmelCase : Optional[Any] = 0
while mid <= high:
if sequence[mid] == colors[0]:
_lowerCAmelCase , _lowerCAmelCase : Tuple = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
_lowerCAmelCase , _lowerCAmelCase : Tuple = sequence[high], sequence[mid]
high -= 1
else:
_lowerCAmelCase : Optional[int] = f'The elements inside the sequence must contains only {colors} values'
raise ValueError(_A )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : str = input("""Enter numbers separated by commas:\n""").strip()
lowerCAmelCase : Dict = [int(item.strip()) for item in user_input.split(""",""")]
print(F'''{dutch_national_flag_sort(unsorted)}''')
| 25 | 1 |
'''simple docstring'''
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def lowercase (_A ):
"""simple docstring"""
return (data["data"], data["target"])
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = XGBClassifier()
classifier.fit(_A , _A )
return classifier
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Tuple = load_iris()
_lowerCAmelCase , _lowerCAmelCase : List[str] = data_handling(_A )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = train_test_split(
_A , _A , test_size=0.25 )
_lowerCAmelCase : Tuple = iris['target_names']
# Create an XGBoost Classifier from the training data
_lowerCAmelCase : Optional[Any] = xgboost(_A , _A )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
_A , _A , _A , display_labels=_A , cmap='Blues' , normalize='true' , )
plt.title('Normalized Confusion Matrix - IRIS Dataset' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 25 |
'''simple docstring'''
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Optional[int] = [3_1, 2_8, 3_1, 3_0, 3_1, 3_0, 3_1, 3_1, 3_0, 3_1, 3_0, 3_1]
_lowerCAmelCase : int = 6
_lowerCAmelCase : Dict = 1
_lowerCAmelCase : Optional[int] = 1_9_0_1
_lowerCAmelCase : Optional[Any] = 0
while year < 2_0_0_1:
day += 7
if (year % 4 == 0 and year % 1_0_0 != 0) or (year % 4_0_0 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
_lowerCAmelCase : List[str] = day - days_per_month[month - 2]
elif day > 2_9 and month == 2:
month += 1
_lowerCAmelCase : List[str] = day - 2_9
else:
if day > days_per_month[month - 1]:
month += 1
_lowerCAmelCase : List[str] = day - days_per_month[month - 2]
if month > 1_2:
year += 1
_lowerCAmelCase : Optional[int] = 1
if year < 2_0_0_1 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 25 | 1 |
'''simple docstring'''
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Any = {"""vocab_file""": """vocab.txt"""}
lowerCAmelCase : Union[str, Any] = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
lowerCAmelCase : Tuple = {
"""openbmb/cpm-ant-10b""": 10_24,
}
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = collections.OrderedDict()
with open(_A , 'r' , encoding='utf-8' ) as reader:
_lowerCAmelCase : List[str] = reader.readlines()
for index, token in enumerate(_A ):
_lowerCAmelCase : Tuple = token.rstrip('\n' )
_lowerCAmelCase : Tuple = index
return vocab
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__="<unk>" , snake_case__=200 ):
'''simple docstring'''
_lowerCAmelCase : Any = vocab
_lowerCAmelCase : Any = unk_token
_lowerCAmelCase : Dict = max_input_chars_per_word
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : int = list(snake_case__ )
if len(snake_case__ ) > self.max_input_chars_per_word:
return [self.unk_token]
_lowerCAmelCase : Any = 0
_lowerCAmelCase : List[Any] = []
while start < len(snake_case__ ):
_lowerCAmelCase : Any = len(snake_case__ )
_lowerCAmelCase : str = None
while start < end:
_lowerCAmelCase : Tuple = ''.join(chars[start:end] )
if substr in self.vocab:
_lowerCAmelCase : Dict = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(snake_case__ )
_lowerCAmelCase : Any = end
return sub_tokens
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ["input_ids", "attention_mask"]
__magic_name__ = False
def __init__( self , snake_case__ , snake_case__="<d>" , snake_case__="</d>" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="<pad>" , snake_case__="<unk>" , snake_case__="</n>" , snake_case__="</_>" , snake_case__="left" , **snake_case__ , ):
'''simple docstring'''
requires_backends(self , ['jieba'] )
super().__init__(
bod_token=snake_case__ , eod_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , pad_token=snake_case__ , unk_token=snake_case__ , line_token=snake_case__ , space_token=snake_case__ , padding_side=snake_case__ , **snake_case__ , )
_lowerCAmelCase : Optional[int] = bod_token
_lowerCAmelCase : Tuple = eod_token
_lowerCAmelCase : Optional[Any] = load_vocab(snake_case__ )
_lowerCAmelCase : Dict = self.encoder[space_token]
_lowerCAmelCase : Any = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
_lowerCAmelCase : List[Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda snake_case__ : x[1] ) )
_lowerCAmelCase : Optional[Any] = {v: k for k, v in self.encoder.items()}
_lowerCAmelCase : Tuple = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def a ( self ):
'''simple docstring'''
return self.encoder[self.bod_token]
@property
def a ( self ):
'''simple docstring'''
return self.encoder[self.eod_token]
@property
def a ( self ):
'''simple docstring'''
return self.encoder["\n"]
@property
def a ( self ):
'''simple docstring'''
return len(self.encoder )
def a ( self ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = []
for x in jieba.cut(snake_case__ , cut_all=snake_case__ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(snake_case__ ) )
return output_tokens
def a ( self , snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = [i for i in token_ids if i >= 0]
_lowerCAmelCase : Optional[Any] = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(snake_case__ , **snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
return token in self.encoder
def a ( self , snake_case__ ):
'''simple docstring'''
return "".join(snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def a ( self , snake_case__ ):
'''simple docstring'''
return self.decoder.get(snake_case__ , self.unk_token )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if os.path.isdir(snake_case__ ):
_lowerCAmelCase : List[Any] = os.path.join(
snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
_lowerCAmelCase : int = (filename_prefix + '-' if filename_prefix else '') + save_directory
_lowerCAmelCase : Tuple = 0
if " " in self.encoder:
_lowerCAmelCase : str = self.encoder[' ']
del self.encoder[" "]
if "\n" in self.encoder:
_lowerCAmelCase : Union[str, Any] = self.encoder['\n']
del self.encoder["\n"]
_lowerCAmelCase : List[str] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda snake_case__ : x[1] ) )
with open(snake_case__ , 'w' , encoding='utf-8' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
_lowerCAmelCase : Optional[Any] = token_index
writer.write(token + '\n' )
index += 1
return (vocab_file,)
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def a ( self , snake_case__ , snake_case__ = None , snake_case__ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is not None:
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ ))
return [1] + ([0] * len(snake_case__ ))
| 25 |
'''simple docstring'''
def lowercase (_A = 1_0_0_0_0_0_0 ):
"""simple docstring"""
_lowerCAmelCase : Any = set(range(3 , _A , 2 ) )
primes.add(2 )
for p in range(3 , _A , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _A , _A ) ) )
_lowerCAmelCase : Union[str, Any] = [float(_A ) for n in range(limit + 1 )]
for p in primes:
for n in range(_A , limit + 1 , _A ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 25 | 1 |
'''simple docstring'''
from __future__ import annotations
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : int = order
# a_{0} ... a_{k}
_lowerCAmelCase : List[Any] = [1.0] + [0.0] * order
# b_{0} ... b_{k}
_lowerCAmelCase : str = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
_lowerCAmelCase : List[Any] = [0.0] * self.order
# y[n-1] ... y[n-k]
_lowerCAmelCase : Optional[int] = [0.0] * self.order
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
if len(snake_case__ ) < self.order:
_lowerCAmelCase : str = [1.0, *a_coeffs]
if len(snake_case__ ) != self.order + 1:
_lowerCAmelCase : Dict = (
F'Expected a_coeffs to have {self.order + 1} elements '
F'for {self.order}-order filter, got {len(snake_case__ )}'
)
raise ValueError(snake_case__ )
if len(snake_case__ ) != self.order + 1:
_lowerCAmelCase : Tuple = (
F'Expected b_coeffs to have {self.order + 1} elements '
F'for {self.order}-order filter, got {len(snake_case__ )}'
)
raise ValueError(snake_case__ )
_lowerCAmelCase : Optional[Any] = a_coeffs
_lowerCAmelCase : Union[str, Any] = b_coeffs
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
_lowerCAmelCase : Optional[int] = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
_lowerCAmelCase : int = self.input_history[:-1]
_lowerCAmelCase : Optional[Any] = self.output_history[:-1]
_lowerCAmelCase : Tuple = sample
_lowerCAmelCase : str = result
return result
| 25 |
'''simple docstring'''
import argparse
import os
import re
lowerCAmelCase : Tuple = """src/transformers"""
# Pattern that looks at the indentation in a line.
lowerCAmelCase : str = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCAmelCase : str = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCAmelCase : Optional[int] = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCAmelCase : List[str] = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCAmelCase : Optional[int] = re.compile(r"""\[([^\]]+)\]""")
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : int = _re_indent.search(_A )
return "" if search is None else search.groups()[0]
def lowercase (_A , _A="" , _A=None , _A=None ):
"""simple docstring"""
_lowerCAmelCase : int = 0
_lowerCAmelCase : Dict = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(_A ):
index += 1
_lowerCAmelCase : Dict = ['\n'.join(lines[:index] )]
else:
_lowerCAmelCase : str = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowerCAmelCase : List[Any] = [lines[index]]
index += 1
while index < len(_A ) and (end_prompt is None or not lines[index].startswith(_A )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_A ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(_A ) )
if index < len(_A ) - 1:
_lowerCAmelCase : Union[str, Any] = [lines[index + 1]]
index += 1
else:
_lowerCAmelCase : Union[str, Any] = []
else:
blocks.append('\n'.join(_A ) )
_lowerCAmelCase : List[str] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_A ) > 0:
blocks.append('\n'.join(_A ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_A ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def lowercase (_A ):
"""simple docstring"""
def _inner(_A ):
return key(_A ).lower().replace('_' , '' )
return _inner
def lowercase (_A , _A=None ):
"""simple docstring"""
def noop(_A ):
return x
if key is None:
_lowerCAmelCase : List[Any] = noop
# Constants are all uppercase, they go first.
_lowerCAmelCase : List[Any] = [obj for obj in objects if key(_A ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowerCAmelCase : Tuple = [obj for obj in objects if key(_A )[0].isupper() and not key(_A ).isupper()]
# Functions begin with a lowercase, they go last.
_lowerCAmelCase : List[str] = [obj for obj in objects if not key(_A )[0].isupper()]
_lowerCAmelCase : Dict = ignore_underscore(_A )
return sorted(_A , key=_A ) + sorted(_A , key=_A ) + sorted(_A , key=_A )
def lowercase (_A ):
"""simple docstring"""
def _replace(_A ):
_lowerCAmelCase : Dict = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
_lowerCAmelCase : Union[str, Any] = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : int = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(_A )] ) + "]"
_lowerCAmelCase : Tuple = import_statement.split('\n' )
if len(_A ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowerCAmelCase : Optional[Any] = 2 if lines[1].strip() == '[' else 1
_lowerCAmelCase : List[str] = [(i, _re_strip_line.search(_A ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowerCAmelCase : Dict = sort_objects(_A , key=lambda _A : x[1] )
_lowerCAmelCase : Tuple = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_A ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowerCAmelCase : Tuple = _re_bracket_content.sub(_replace , lines[1] )
else:
_lowerCAmelCase : Optional[Any] = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : List[str] = keys[:-1]
_lowerCAmelCase : Optional[Any] = get_indent(lines[1] ) + ', '.join([f'"{k}"' for k in sort_objects(_A )] )
return "\n".join(_A )
else:
# Finally we have to deal with imports fitting on one line
_lowerCAmelCase : Union[str, Any] = _re_bracket_content.sub(_replace , _A )
return import_statement
def lowercase (_A , _A=True ):
"""simple docstring"""
with open(_A , encoding='utf-8' ) as f:
_lowerCAmelCase : Any = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowerCAmelCase : Tuple = split_code_in_indented_blocks(
_A , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_A ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowerCAmelCase : Tuple = main_blocks[block_idx]
_lowerCAmelCase : int = block.split('\n' )
# Get to the start of the imports.
_lowerCAmelCase : Tuple = 0
while line_idx < len(_A ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowerCAmelCase : Dict = len(_A )
else:
line_idx += 1
if line_idx >= len(_A ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowerCAmelCase : str = '\n'.join(block_lines[line_idx:-1] )
_lowerCAmelCase : Tuple = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowerCAmelCase : List[Any] = split_code_in_indented_blocks(_A , indent_level=_A )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowerCAmelCase : Optional[int] = _re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowerCAmelCase : int = [(pattern.search(_A ).groups()[0] if pattern.search(_A ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowerCAmelCase : Dict = [(i, key) for i, key in enumerate(_A ) if key is not None]
_lowerCAmelCase : Optional[int] = [x[0] for x in sorted(_A , key=lambda _A : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowerCAmelCase : int = 0
_lowerCAmelCase : Optional[Any] = []
for i in range(len(_A ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
_lowerCAmelCase : Optional[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(_A )
count += 1
# And we put our main block back together with its first and last line.
_lowerCAmelCase : Optional[int] = '\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(_A ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(_A , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(_A ) )
def lowercase (_A=True ):
"""simple docstring"""
_lowerCAmelCase : int = []
for root, _, files in os.walk(_A ):
if "__init__.py" in files:
_lowerCAmelCase : Optional[Any] = sort_imports(os.path.join(_A , '__init__.py' ) , check_only=_A )
if result:
_lowerCAmelCase : Optional[int] = [os.path.join(_A , '__init__.py' )]
if len(_A ) > 0:
raise ValueError(f'Would overwrite {len(_A )} files, run `make style`.' )
if __name__ == "__main__":
lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
lowerCAmelCase : List[str] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 25 | 1 |
'''simple docstring'''
import math
import unittest
def lowercase (_A ):
"""simple docstring"""
assert isinstance(_A , _A ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def a ( self ):
'''simple docstring'''
with self.assertRaises(snake_case__ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , )
self.assertFalse(
is_prime(1 ) , 'One only has 1 positive factor, primes must have exactly two.' , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 25 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = KandinskyVaaInpaintPipeline
__magic_name__ = ["image_embeds", "negative_image_embeds", "image", "mask_image"]
__magic_name__ = [
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
__magic_name__ = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__magic_name__ = False
@property
def a ( self ):
'''simple docstring'''
return 32
@property
def a ( self ):
'''simple docstring'''
return 32
@property
def a ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def a ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def a ( self ):
'''simple docstring'''
return 100
@property
def a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_lowerCAmelCase : Union[str, Any] = UNetaDConditionModel(**snake_case__ )
return model
@property
def a ( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Dict = VQModel(**self.dummy_movq_kwargs )
return model
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.dummy_unet
_lowerCAmelCase : List[Any] = self.dummy_movq
_lowerCAmelCase : Union[str, Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , steps_offset=1 , prediction_type='epsilon' , thresholding=snake_case__ , )
_lowerCAmelCase : Any = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def a ( self , snake_case__ , snake_case__=0 ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
_lowerCAmelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case__ )
# create init_image
_lowerCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
_lowerCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(snake_case__ ) ).convert('RGB' ).resize((256, 256) )
# create mask
_lowerCAmelCase : List[str] = np.ones((64, 64) , dtype=np.floataa )
_lowerCAmelCase : Dict = 0
if str(snake_case__ ).startswith('mps' ):
_lowerCAmelCase : Optional[Any] = torch.manual_seed(snake_case__ )
else:
_lowerCAmelCase : List[Any] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
_lowerCAmelCase : Optional[int] = {
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = 'cpu'
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Dict = self.pipeline_class(**snake_case__ )
_lowerCAmelCase : Optional[int] = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
_lowerCAmelCase : Union[str, Any] = pipe(**self.get_dummy_inputs(snake_case__ ) )
_lowerCAmelCase : int = output.images
_lowerCAmelCase : int = pipe(
**self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0]
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
_lowerCAmelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
print(F'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : List[str] = np.array(
[0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def a ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' )
_lowerCAmelCase : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
_lowerCAmelCase : Dict = np.ones((768, 768) , dtype=np.floataa )
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : List[str] = 'a hat'
_lowerCAmelCase : Any = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(snake_case__ )
_lowerCAmelCase : Union[str, Any] = KandinskyVaaInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa )
_lowerCAmelCase : Optional[Any] = pipeline.to(snake_case__ )
pipeline.set_progress_bar_config(disable=snake_case__ )
_lowerCAmelCase : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase , _lowerCAmelCase : Dict = pipe_prior(
snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
_lowerCAmelCase : Optional[Any] = pipeline(
image=snake_case__ , mask_image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=100 , height=768 , width=768 , output_type='np' , )
_lowerCAmelCase : Union[str, Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 25 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = LEDTokenizer
__magic_name__ = LEDTokenizerFast
__magic_name__ = True
def a ( self ):
'''simple docstring'''
super().setUp()
_lowerCAmelCase : Optional[int] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
_lowerCAmelCase : List[Any] = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
_lowerCAmelCase : Optional[int] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_lowerCAmelCase : str = {'unk_token': '<unk>'}
_lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case__ ) )
def a ( self , **snake_case__ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case__ )
def a ( self , **snake_case__ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def a ( self ):
'''simple docstring'''
return LEDTokenizer.from_pretrained('allenai/led-base-16384' )
@cached_property
def a ( self ):
'''simple docstring'''
return LEDTokenizerFast.from_pretrained('allenai/led-base-16384' )
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_lowerCAmelCase : Optional[Any] = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCAmelCase : Dict = tokenizer(snake_case__ , max_length=len(snake_case__ ) , padding=snake_case__ , return_tensors='pt' )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_lowerCAmelCase : str = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case__ , snake_case__ )
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCAmelCase : Dict = tokenizer(snake_case__ , padding=snake_case__ , return_tensors='pt' )
self.assertIn('input_ids' , snake_case__ )
self.assertIn('attention_mask' , snake_case__ )
self.assertNotIn('labels' , snake_case__ )
self.assertNotIn('decoder_attention_mask' , snake_case__ )
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCAmelCase : int = tokenizer(text_target=snake_case__ , max_length=32 , padding='max_length' , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
@require_torch
def a ( self ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCAmelCase : List[Any] = tokenizer(
['I am a small frog' * 1024, 'I am a small frog'] , padding=snake_case__ , truncation=snake_case__ , return_tensors='pt' )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = ['A long paragraph for summarization.']
_lowerCAmelCase : str = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCAmelCase : int = tokenizer(snake_case__ , return_tensors='pt' )
_lowerCAmelCase : List[Any] = tokenizer(text_target=snake_case__ , return_tensors='pt' )
_lowerCAmelCase : str = inputs['input_ids']
_lowerCAmelCase : int = targets['input_ids']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def a ( self ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCAmelCase : int = ['Summary of the text.', 'Another summary.']
_lowerCAmelCase : Dict = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_lowerCAmelCase : List[Any] = tokenizer(snake_case__ , padding=snake_case__ )
_lowerCAmelCase : Tuple = [[0] * len(snake_case__ ) for x in encoded_output['input_ids']]
_lowerCAmelCase : Optional[Any] = tokenizer.pad(snake_case__ )
self.assertSequenceEqual(outputs['global_attention_mask'] , snake_case__ )
def a ( self ):
'''simple docstring'''
pass
def a ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
_lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
_lowerCAmelCase : List[str] = 'A, <mask> AllenNLP sentence.'
_lowerCAmelCase : Union[str, Any] = tokenizer_r.encode_plus(snake_case__ , add_special_tokens=snake_case__ , return_token_type_ids=snake_case__ )
_lowerCAmelCase : Dict = tokenizer_p.encode_plus(snake_case__ , add_special_tokens=snake_case__ , return_token_type_ids=snake_case__ )
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
_lowerCAmelCase : List[str] = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
_lowerCAmelCase : str = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
snake_case__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
snake_case__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 25 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def lowercase (_A ):
"""simple docstring"""
if not postfix_notation:
return 0
_lowerCAmelCase : int = {'+', '-', '*', '/'}
_lowerCAmelCase : list[Any] = []
for token in postfix_notation:
if token in operations:
_lowerCAmelCase , _lowerCAmelCase : Tuple = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(_A ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 1 |
'''simple docstring'''
def lowercase (_A , _A ):
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(_A , int(b / 2 ) ) * actual_power(_A , int(b / 2 ) )
else:
return a * actual_power(_A , int(b / 2 ) ) * actual_power(_A , int(b / 2 ) )
def lowercase (_A , _A ):
"""simple docstring"""
if b < 0:
return 1 / actual_power(_A , _A )
return actual_power(_A , _A )
if __name__ == "__main__":
print(power(-2, -3))
| 25 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] = {
"""google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""",
"""google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""",
"""google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "mobilenet_v2"
def __init__( self , snake_case__=3 , snake_case__=224 , snake_case__=1.0 , snake_case__=8 , snake_case__=8 , snake_case__=6 , snake_case__=32 , snake_case__=True , snake_case__=True , snake_case__="relu6" , snake_case__=True , snake_case__=0.8 , snake_case__=0.02 , snake_case__=0.001 , snake_case__=255 , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Union[str, Any] = image_size
_lowerCAmelCase : List[Any] = depth_multiplier
_lowerCAmelCase : List[Any] = depth_divisible_by
_lowerCAmelCase : Optional[Any] = min_depth
_lowerCAmelCase : str = expand_ratio
_lowerCAmelCase : str = output_stride
_lowerCAmelCase : Any = first_layer_is_expansion
_lowerCAmelCase : int = finegrained_output
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : List[str] = tf_padding
_lowerCAmelCase : Optional[int] = classifier_dropout_prob
_lowerCAmelCase : int = initializer_range
_lowerCAmelCase : Optional[int] = layer_norm_eps
_lowerCAmelCase : str = semantic_loss_ignore_index
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = version.parse("1.11" )
@property
def a ( self ):
'''simple docstring'''
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def a ( self ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def a ( self ):
'''simple docstring'''
return 1E-4
| 25 | 1 |
'''simple docstring'''
def lowercase (_A = 2_0_0 ):
"""simple docstring"""
_lowerCAmelCase : Tuple = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0]
_lowerCAmelCase : List[str] = [0] * (pence + 1)
_lowerCAmelCase : Tuple = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_A , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_00) == 7_36_82
| 25 |
'''simple docstring'''
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = SMALL_MODEL_IDENTIFIER
_lowerCAmelCase : Optional[int] = 'pt'
_lowerCAmelCase : Tuple = 'tf'
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Tuple = TFAutoModel.from_pretrained(self.test_model , from_pt=snake_case__ )
model_tf.save_pretrained(snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = 'mock_framework'
# Framework provided - return whatever the user provides
_lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model , snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(snake_case__ )
_lowerCAmelCase : Dict = FeaturesManager.determine_framework(snake_case__ , snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(snake_case__ )
_lowerCAmelCase : int = FeaturesManager.determine_framework(snake_case__ , snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
def a ( self ):
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(snake_case__ )
_lowerCAmelCase : Tuple = FeaturesManager.determine_framework(snake_case__ )
self.assertEqual(snake_case__ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(snake_case__ )
_lowerCAmelCase : Optional[int] = FeaturesManager.determine_framework(snake_case__ )
self.assertEqual(snake_case__ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(snake_case__ ):
_lowerCAmelCase : str = FeaturesManager.determine_framework(snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = MagicMock(return_value=snake_case__ )
with patch('transformers.onnx.features.is_tf_available' , snake_case__ ):
_lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(snake_case__ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
_lowerCAmelCase : Any = MagicMock(return_value=snake_case__ )
with patch('transformers.onnx.features.is_torch_available' , snake_case__ ):
_lowerCAmelCase : Union[str, Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(snake_case__ , self.framework_tf )
# Both in environment -> use PyTorch
_lowerCAmelCase : int = MagicMock(return_value=snake_case__ )
_lowerCAmelCase : Optional[int] = MagicMock(return_value=snake_case__ )
with patch('transformers.onnx.features.is_tf_available' , snake_case__ ), patch(
'transformers.onnx.features.is_torch_available' , snake_case__ ):
_lowerCAmelCase : Dict = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(snake_case__ , self.framework_pt )
# Both not in environment -> raise error
_lowerCAmelCase : str = MagicMock(return_value=snake_case__ )
_lowerCAmelCase : Optional[Any] = MagicMock(return_value=snake_case__ )
with patch('transformers.onnx.features.is_tf_available' , snake_case__ ), patch(
'transformers.onnx.features.is_torch_available' , snake_case__ ):
with self.assertRaises(snake_case__ ):
_lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model )
| 25 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = MvpTokenizer
__magic_name__ = MvpTokenizerFast
__magic_name__ = True
__magic_name__ = filter_roberta_detectors
def a ( self ):
'''simple docstring'''
super().setUp()
_lowerCAmelCase : int = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
_lowerCAmelCase : Dict = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
_lowerCAmelCase : Dict = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_lowerCAmelCase : int = {'unk_token': '<unk>'}
_lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case__ ) )
def a ( self , **snake_case__ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case__ )
def a ( self , **snake_case__ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def a ( self ):
'''simple docstring'''
return MvpTokenizer.from_pretrained('RUCAIBox/mvp' )
@cached_property
def a ( self ):
'''simple docstring'''
return MvpTokenizerFast.from_pretrained('RUCAIBox/mvp' )
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_lowerCAmelCase : Optional[Any] = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCAmelCase : Tuple = tokenizer(snake_case__ , max_length=len(snake_case__ ) , padding=snake_case__ , return_tensors='pt' )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_lowerCAmelCase : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case__ , snake_case__ )
# Test that special tokens are reset
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCAmelCase : int = tokenizer(snake_case__ , padding=snake_case__ , return_tensors='pt' )
# check if input_ids are returned and no labels
self.assertIn('input_ids' , snake_case__ )
self.assertIn('attention_mask' , snake_case__ )
self.assertNotIn('labels' , snake_case__ )
self.assertNotIn('decoder_attention_mask' , snake_case__ )
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCAmelCase : Tuple = tokenizer(text_target=snake_case__ , max_length=32 , padding='max_length' , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
@require_torch
def a ( self ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCAmelCase : Optional[Any] = tokenizer(
['I am a small frog' * 1024, 'I am a small frog'] , padding=snake_case__ , truncation=snake_case__ , return_tensors='pt' )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertEqual(batch.input_ids.shape , (2, 1024) )
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ['A long paragraph for summarization.']
_lowerCAmelCase : List[Any] = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCAmelCase : Optional[Any] = tokenizer(snake_case__ , text_target=snake_case__ , return_tensors='pt' )
_lowerCAmelCase : int = inputs['input_ids']
_lowerCAmelCase : str = inputs['labels']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def a ( self ):
'''simple docstring'''
pass
def a ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCAmelCase : List[str] = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
_lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
_lowerCAmelCase : Optional[int] = 'A, <mask> AllenNLP sentence.'
_lowerCAmelCase : Optional[int] = tokenizer_r.encode_plus(snake_case__ , add_special_tokens=snake_case__ , return_token_type_ids=snake_case__ )
_lowerCAmelCase : Union[str, Any] = tokenizer_p.encode_plus(snake_case__ , add_special_tokens=snake_case__ , return_token_type_ids=snake_case__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
_lowerCAmelCase : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
_lowerCAmelCase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
snake_case__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
snake_case__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 25 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : Any = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : List[str] = {
"""facebook/nllb-large-en-ro""": 10_24,
"""facebook/nllb-200-distilled-600M""": 10_24,
}
# fmt: off
lowerCAmelCase : Optional[int] = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = ["input_ids", "attention_mask"]
__magic_name__ = NllbTokenizer
__magic_name__ = []
__magic_name__ = []
def __init__( self , snake_case__=None , snake_case__=None , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=False , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
_lowerCAmelCase : Dict = legacy_behaviour
super().__init__(
vocab_file=snake_case__ , tokenizer_file=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , src_lang=snake_case__ , tgt_lang=snake_case__ , additional_special_tokens=snake_case__ , legacy_behaviour=snake_case__ , **snake_case__ , )
_lowerCAmelCase : List[str] = vocab_file
_lowerCAmelCase : int = False if not self.vocab_file else True
_lowerCAmelCase : str = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
_lowerCAmelCase : Any = {
lang_code: self.convert_tokens_to_ids(snake_case__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_lowerCAmelCase : List[Any] = src_lang if src_lang is not None else 'eng_Latn'
_lowerCAmelCase : str = self.convert_tokens_to_ids(self._src_lang )
_lowerCAmelCase : Tuple = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def a ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
_lowerCAmelCase : Optional[Any] = src_lang
_lowerCAmelCase : Union[str, Any] = self(snake_case__ , add_special_tokens=snake_case__ , return_tensors=snake_case__ , **snake_case__ )
_lowerCAmelCase : int = self.convert_tokens_to_ids(snake_case__ )
_lowerCAmelCase : Optional[Any] = tgt_lang_id
return inputs
def a ( self , snake_case__ , snake_case__ = "eng_Latn" , snake_case__ = None , snake_case__ = "fra_Latn" , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[str] = src_lang
_lowerCAmelCase : Optional[int] = tgt_lang
return super().prepare_seqaseq_batch(snake_case__ , snake_case__ , **snake_case__ )
def a ( self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def a ( self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = self.convert_tokens_to_ids(snake_case__ )
if self.legacy_behaviour:
_lowerCAmelCase : Dict = []
_lowerCAmelCase : List[str] = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCAmelCase : int = [self.cur_lang_code]
_lowerCAmelCase : int = [self.eos_token_id]
_lowerCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
_lowerCAmelCase : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
_lowerCAmelCase : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.convert_tokens_to_ids(snake_case__ )
if self.legacy_behaviour:
_lowerCAmelCase : int = []
_lowerCAmelCase : Dict = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCAmelCase : int = [self.cur_lang_code]
_lowerCAmelCase : List[str] = [self.eos_token_id]
_lowerCAmelCase : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
_lowerCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
_lowerCAmelCase : str = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(snake_case__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
_lowerCAmelCase : Union[str, Any] = os.path.join(
snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
return (out_vocab_file,)
| 25 | 1 |
'''simple docstring'''
import requests
lowerCAmelCase : Optional[Any] = """YOUR API KEY"""
def lowercase (_A , _A = giphy_api_key ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = '+'.join(query.split() )
_lowerCAmelCase : Any = f'https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'
_lowerCAmelCase : Dict = requests.get(_A ).json()['data']
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("""\n""".join(get_gifs("""space ship""")))
| 25 |
'''simple docstring'''
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
lowerCAmelCase : List[str] = [
"""kernels/rwkv/wkv_cuda.cu""",
"""kernels/rwkv/wkv_op.cpp""",
"""kernels/deformable_detr/ms_deform_attn.h""",
"""kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh""",
"""models/graphormer/algos_graphormer.pyx""",
]
def lowercase (_A ):
"""simple docstring"""
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument("""--check_lib""", action="""store_true""", help="""Whether to check the build or the actual package.""")
lowerCAmelCase : Dict = parser.parse_args()
if args.check_lib:
lowerCAmelCase : Union[str, Any] = importlib.import_module("""transformers""")
lowerCAmelCase : int = Path(transformers_module.__file__).parent
else:
lowerCAmelCase : int = Path.cwd() / """build/lib/transformers"""
if not test_custom_files_are_present(transformers_path):
raise ValueError("""The built release does not contain the custom files. Fix this before going further!""")
| 25 | 1 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
lowerCAmelCase = pytest.mark.integration
@require_faiss
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(__a ) for x in np.arange(30 ).tolist()]} )
return dset
def a ( self ):
'''simple docstring'''
import faiss
_lowerCAmelCase : Dataset = self._create_dummy_dataset()
_lowerCAmelCase : Dict = dset.map(
lambda snake_case__ , snake_case__ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__a , keep_in_memory=__a )
_lowerCAmelCase : List[Any] = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
_lowerCAmelCase : Any = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def a ( self ):
'''simple docstring'''
import faiss
_lowerCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
_lowerCAmelCase : Any = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def a ( self ):
'''simple docstring'''
import faiss
_lowerCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__a ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
_lowerCAmelCase : str = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(__a , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def a ( self ):
'''simple docstring'''
from elasticsearch import Elasticsearch
_lowerCAmelCase : Dataset = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
_lowerCAmelCase : Any = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
_lowerCAmelCase : Dict = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
_lowerCAmelCase : Union[str, Any] = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=__a )
_lowerCAmelCase : str = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
import faiss
_lowerCAmelCase : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
_lowerCAmelCase : Dict = np.zeros(5 , dtype=np.floataa )
_lowerCAmelCase : List[str] = 1
_lowerCAmelCase : List[Any] = index.search(__a )
self.assertRaises(__a , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
_lowerCAmelCase : List[str] = np.eye(5 , dtype=np.floataa )[::-1]
_lowerCAmelCase : Dict = index.search_batch(__a )
self.assertRaises(__a , index.search_batch , queries[0] )
_lowerCAmelCase : Any = [scores[0] for scores in total_scores]
_lowerCAmelCase : List[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__a ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __a )
def a ( self ):
'''simple docstring'''
import faiss
_lowerCAmelCase : int = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
_lowerCAmelCase : List[str] = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__a ):
_lowerCAmelCase : Dict = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def a ( self ):
'''simple docstring'''
import faiss
_lowerCAmelCase : Tuple = faiss.IndexFlat(5 )
_lowerCAmelCase : List[Any] = FaissIndex(custom_index=__a )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def a ( self ):
'''simple docstring'''
import faiss
_lowerCAmelCase : Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__a ) as tmp_file:
index.save(tmp_file.name )
_lowerCAmelCase : List[Any] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
_lowerCAmelCase : List[Any] = np.zeros(5 , dtype=np.floataa )
_lowerCAmelCase : Any = 1
_lowerCAmelCase : int = index.search(__a )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def lowercase (_A ):
"""simple docstring"""
import faiss
_lowerCAmelCase : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
_lowerCAmelCase : Dict = 'index.faiss'
_lowerCAmelCase : Any = f'mock://{index_name}'
index.save(_UpperCAmelCase , storage_options=mockfs.storage_options )
_lowerCAmelCase : Any = FaissIndex.load(_UpperCAmelCase , storage_options=mockfs.storage_options )
_lowerCAmelCase : Any = np.zeros(5 , dtype=np.floataa )
_lowerCAmelCase : Any = 1
_lowerCAmelCase : Tuple = index.search(_UpperCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
_lowerCAmelCase : int = Elasticsearch()
_lowerCAmelCase : Dict = {'acknowledged': True}
_lowerCAmelCase : List[Any] = ElasticSearchIndex(es_client=__a )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
_lowerCAmelCase : Optional[Any] = 'foo'
_lowerCAmelCase : int = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
_lowerCAmelCase : List[Any] = index.search(__a )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
_lowerCAmelCase : Dict = 'foo'
_lowerCAmelCase : Dict = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
_lowerCAmelCase : Optional[Any] = index.search(__a , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
_lowerCAmelCase : List[Any] = ['foo', 'bar', 'foobar']
_lowerCAmelCase : str = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
_lowerCAmelCase : Any = index.search_batch(__a )
_lowerCAmelCase : Any = [scores[0] for scores in total_scores]
_lowerCAmelCase : Tuple = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__a ) , 0 )
self.assertListEqual([1, 1, 1] , __a )
# batched queries with timeout
_lowerCAmelCase : Tuple = ['foo', 'bar', 'foobar']
_lowerCAmelCase : List[Any] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
_lowerCAmelCase : int = index.search_batch(__a , request_timeout=30 )
_lowerCAmelCase : Any = [scores[0] for scores in total_scores]
_lowerCAmelCase : Dict = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__a ) , 0 )
self.assertListEqual([1, 1, 1] , __a )
| 350 |
'''simple docstring'''
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
_lowerCAmelCase : List[str] = ''
_lowerCAmelCase : Any = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(_A ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = 0, 0
# length[i] shows the length of palindromic substring with center i
_lowerCAmelCase : List[str] = [1 for i in range(len(_A ) )]
# for each character in new_string find corresponding palindromic string
_lowerCAmelCase : Any = 0
for j in range(len(_A ) ):
_lowerCAmelCase : Optional[Any] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(_A )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_lowerCAmelCase : List[str] = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_lowerCAmelCase : Optional[Any] = j - k + 1 # noqa: E741
_lowerCAmelCase : int = j + k - 1
# update max_length and start position
if max_length < length[j]:
_lowerCAmelCase : Dict = length[j]
_lowerCAmelCase : Optional[int] = j
# create that string
_lowerCAmelCase : List[str] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class UpperCamelCase__ ( A__ ):
"""simple docstring"""
def __init__( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
warnings.warn(
'The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use OwlViTImageProcessor instead.' , __A , )
super().__init__(*__A , **__A )
| 351 |
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = 0
__magic_name__ = False
__magic_name__ = 3.0
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} )
self.assertDictEqual(MockClass(a=2 , b=snake_case__ ).to_kwargs() , {'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} )
@require_cuda
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
_lowerCAmelCase : Dict = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
_lowerCAmelCase : str = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , snake_case__ )
@require_multi_gpu
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(snake_case__ , env=os.environ.copy() )
if __name__ == "__main__":
lowerCAmelCase : int = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
lowerCAmelCase : Tuple = Accelerator(kwargs_handlers=[ddp_scaler])
lowerCAmelCase : Optional[Any] = torch.nn.Linear(1_00, 2_00)
lowerCAmelCase : List[str] = accelerator.prepare(model)
# Check the values changed in kwargs
lowerCAmelCase : List[Any] = """"""
lowerCAmelCase : Tuple = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 25 | 0 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def lowercase (_A = "https://www.worldometers.info/coronavirus" ):
"""simple docstring"""
_lowerCAmelCase : List[str] = BeautifulSoup(requests.get(__UpperCAmelCase ).text , 'html.parser' )
_lowerCAmelCase : List[Any] = soup.findAll('h1' )
_lowerCAmelCase : int = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(__UpperCAmelCase , __UpperCAmelCase )}
if __name__ == "__main__":
print("""\033[1m""" + """COVID-19 Status of the World""" + """\033[0m\n""")
for key, value in world_covidaa_stats().items():
print(F'''{key}\n{value}\n''')
| 352 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] = {
"""CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": (
"""https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"""
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "trajectory_transformer"
__magic_name__ = ["past_key_values"]
__magic_name__ = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , snake_case__=100 , snake_case__=5 , snake_case__=1 , snake_case__=1 , snake_case__=249 , snake_case__=6 , snake_case__=17 , snake_case__=25 , snake_case__=4 , snake_case__=4 , snake_case__=128 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.0006 , snake_case__=512 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=1 , snake_case__=True , snake_case__=1 , snake_case__=5_0256 , snake_case__=5_0256 , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Any = action_weight
_lowerCAmelCase : Optional[int] = reward_weight
_lowerCAmelCase : Union[str, Any] = value_weight
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : Tuple = block_size
_lowerCAmelCase : List[Any] = action_dim
_lowerCAmelCase : List[Any] = observation_dim
_lowerCAmelCase : Union[str, Any] = transition_dim
_lowerCAmelCase : Tuple = learning_rate
_lowerCAmelCase : int = n_layer
_lowerCAmelCase : Any = n_head
_lowerCAmelCase : Tuple = n_embd
_lowerCAmelCase : Optional[Any] = embd_pdrop
_lowerCAmelCase : Union[str, Any] = attn_pdrop
_lowerCAmelCase : Any = resid_pdrop
_lowerCAmelCase : Optional[Any] = initializer_range
_lowerCAmelCase : List[Any] = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = kaiming_initializer_range
_lowerCAmelCase : List[Any] = use_cache
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
| 25 | 0 |
'''simple docstring'''
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase__ ( A__ ):
"""simple docstring"""
__magic_name__ = "new-model"
if is_tf_available():
class UpperCamelCase__ ( A__ ):
"""simple docstring"""
__magic_name__ = NewModelConfig
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = 'bert-base-cased'
_lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
_lowerCAmelCase : Union[str, Any] = TFAutoModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = 'bert-base-cased'
_lowerCAmelCase : Tuple = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
_lowerCAmelCase : Union[str, Any] = TFAutoModelForPreTraining.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def a ( self ):
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : List[Any] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
_lowerCAmelCase : Optional[int] = TFAutoModelForCausalLM.from_pretrained(__snake_case )
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = TFAutoModelForCausalLM.from_pretrained(__snake_case , output_loading_info=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def a ( self ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : List[Any] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
_lowerCAmelCase : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def a ( self ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
_lowerCAmelCase : List[str] = TFAutoModelForMaskedLM.from_pretrained(__snake_case )
_lowerCAmelCase , _lowerCAmelCase : int = TFAutoModelForMaskedLM.from_pretrained(__snake_case , output_loading_info=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def a ( self ):
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
_lowerCAmelCase : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(__snake_case )
_lowerCAmelCase , _lowerCAmelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(__snake_case , output_loading_info=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def a ( self ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
_lowerCAmelCase : List[str] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
_lowerCAmelCase : Dict = TFAutoModelForSequenceClassification.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def a ( self ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
_lowerCAmelCase : List[Any] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
_lowerCAmelCase : Tuple = TFAutoModelForQuestionAnswering.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
@require_tensorflow_probability
def a ( self ):
'''simple docstring'''
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
_lowerCAmelCase : Dict = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
_lowerCAmelCase : Any = TFAutoModelForTableQuestionAnswering.from_pretrained(__snake_case )
_lowerCAmelCase , _lowerCAmelCase : Tuple = TFAutoModelForTableQuestionAnswering.from_pretrained(
__snake_case , output_loading_info=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = TFAutoModelWithLMHead.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=__snake_case ) , 1_4410 )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = TFAutoModelWithLMHead.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=__snake_case ) , 1_4410 )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny' )
self.assertIsInstance(__snake_case , __snake_case )
_lowerCAmelCase : Any = copy.deepcopy(model.config )
_lowerCAmelCase : List[Any] = ['FunnelBaseModel']
_lowerCAmelCase : Any = TFAutoModel.from_config(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__snake_case )
_lowerCAmelCase : List[Any] = TFAutoModel.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def a ( self ):
'''simple docstring'''
try:
AutoConfig.register('new-model' , __snake_case )
_lowerCAmelCase : Optional[Any] = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(__snake_case ):
auto_class.register(__snake_case , __snake_case )
auto_class.register(__snake_case , __snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__snake_case ):
auto_class.register(__snake_case , __snake_case )
# Now that the config is registered, it can be used as any other config with the auto-API
_lowerCAmelCase : Union[str, Any] = BertModelTester(self ).get_config()
_lowerCAmelCase : Union[str, Any] = NewModelConfig(**tiny_config.to_dict() )
_lowerCAmelCase : Tuple = auto_class.from_config(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__snake_case )
_lowerCAmelCase : int = auto_class.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def a ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
__snake_case , 'bert-base is not a local folder and is not a valid model identifier' ):
_lowerCAmelCase : int = TFAutoModel.from_pretrained('bert-base' )
def a ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
__snake_case , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
_lowerCAmelCase : Optional[Any] = TFAutoModel.from_pretrained(__snake_case , revision='aaaaaa' )
def a ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
__snake_case , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ):
_lowerCAmelCase : Optional[int] = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def a ( self ):
'''simple docstring'''
with self.assertRaisesRegex(__snake_case , 'Use `from_pt=True` to load this model' ):
_lowerCAmelCase : str = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
_lowerCAmelCase : Tuple = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
_lowerCAmelCase : List[Any] = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
with RequestCounter() as counter:
_lowerCAmelCase : Tuple = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 353 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase : Tuple = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowerCAmelCase : Union[str, Any] = 25_00_04
lowerCAmelCase : int = 25_00_20
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = MBartaaTokenizer
__magic_name__ = MBartaaTokenizerFast
__magic_name__ = True
__magic_name__ = True
def a ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : List[Any] = MBartaaTokenizer(snake_case__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = '<s>'
_lowerCAmelCase : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(snake_case__ ) , 1054 )
def a ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1054 )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = MBartaaTokenizer(snake_case__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=snake_case__ )
_lowerCAmelCase : Any = tokenizer.tokenize('This is a test' )
self.assertListEqual(snake_case__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCAmelCase : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
_lowerCAmelCase : Optional[int] = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(
snake_case__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCAmelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = {'input_ids': [[25_0004, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [25_0004, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_0004, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name='facebook/mbart-large-50' , revision='d3913889c59cd5c9e456b269c376325eabad57e2' , )
def a ( self ):
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_lowerCAmelCase : Optional[int] = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart50', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
_lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
_lowerCAmelCase : Tuple = tokenizer_r.save_pretrained(snake_case__ )
_lowerCAmelCase : str = tokenizer_p.save_pretrained(snake_case__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
_lowerCAmelCase : Any = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(snake_case__ , snake_case__ )
# Checks everything loads correctly in the same way
_lowerCAmelCase : List[str] = tokenizer_r.from_pretrained(snake_case__ )
_lowerCAmelCase : Optional[int] = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(snake_case__ )
# Save tokenizer rust, legacy_format=True
_lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp()
_lowerCAmelCase : Dict = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ )
_lowerCAmelCase : Any = tokenizer_p.save_pretrained(snake_case__ )
# Checks it save with the same files
self.assertSequenceEqual(snake_case__ , snake_case__ )
# Checks everything loads correctly in the same way
_lowerCAmelCase : Dict = tokenizer_r.from_pretrained(snake_case__ )
_lowerCAmelCase : List[str] = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
shutil.rmtree(snake_case__ )
# Save tokenizer rust, legacy_format=False
_lowerCAmelCase : Optional[int] = tempfile.mkdtemp()
_lowerCAmelCase : int = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ )
_lowerCAmelCase : Tuple = tokenizer_p.save_pretrained(snake_case__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_lowerCAmelCase : int = tokenizer_r.from_pretrained(snake_case__ )
_lowerCAmelCase : str = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
shutil.rmtree(snake_case__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
__magic_name__ = "facebook/mbart-large-50-one-to-many-mmt"
__magic_name__ = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
__magic_name__ = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
__magic_name__ = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2]
@classmethod
def a ( cls ):
'''simple docstring'''
_lowerCAmelCase : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
_lowerCAmelCase : Dict = 1
return cls
def a ( self ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 25_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 25_0004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 25_0020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['mr_IN'] , 25_0038 )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , snake_case__ )
def a ( self ):
'''simple docstring'''
self.assertIn(snake_case__ , self.tokenizer.all_special_ids )
_lowerCAmelCase : Union[str, Any] = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
_lowerCAmelCase : List[str] = self.tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ )
_lowerCAmelCase : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
self.assertNotIn(self.tokenizer.eos_token , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , snake_case__ )
_lowerCAmelCase : List[str] = 10
_lowerCAmelCase : Any = self.tokenizer(snake_case__ , max_length=snake_case__ , truncation=snake_case__ ).input_ids[0]
self.assertEqual(ids[0] , snake_case__ )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(snake_case__ ) , snake_case__ )
def a ( self ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [25_0053, 25_0001] )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
_lowerCAmelCase : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(snake_case__ )
_lowerCAmelCase : Tuple = MBartaaTokenizer.from_pretrained(snake_case__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , snake_case__ )
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case__ , return_tensors='pt' )
_lowerCAmelCase : Optional[int] = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
_lowerCAmelCase : int = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
_lowerCAmelCase : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , snake_case__ )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.tokenizer(self.src_text , padding=snake_case__ , truncation=snake_case__ , max_length=3 , return_tensors='pt' )
_lowerCAmelCase : str = self.tokenizer(
text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=10 , return_tensors='pt' )
_lowerCAmelCase : List[Any] = targets['input_ids']
_lowerCAmelCase : Any = shift_tokens_right(snake_case__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(snake_case__ ) , {
# en_XX, A, test, EOS
'input_ids': [[25_0004, 62, 3034, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 25_0001,
} , )
| 25 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase : int = {
"""configuration_mega""": ["""MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegaConfig""", """MegaOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = [
"""MEGA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegaForCausalLM""",
"""MegaForMaskedLM""",
"""MegaForMultipleChoice""",
"""MegaForQuestionAnswering""",
"""MegaForSequenceClassification""",
"""MegaForTokenClassification""",
"""MegaModel""",
"""MegaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 354 |
'''simple docstring'''
from math import isqrt
def lowercase (_A ):
"""simple docstring"""
return all(number % divisor != 0 for divisor in range(2 , isqrt(_A ) + 1 ) )
def lowercase (_A = 1_0**6 ):
"""simple docstring"""
_lowerCAmelCase : str = 0
_lowerCAmelCase : str = 1
_lowerCAmelCase : List[str] = 7
while prime_candidate < max_prime:
primes_count += is_prime(_A )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 25 | 0 |
'''simple docstring'''
import math
def lowercase (_A ):
"""simple docstring"""
if not isinstance(__a , __a ):
_lowerCAmelCase : Tuple = f'Input value of [number={number}] must be an integer'
raise TypeError(__a )
if number < 1:
_lowerCAmelCase : Any = f'Input value of [number={number}] must be > 0'
raise ValueError(__a )
elif number == 1:
return 3
elif number == 2:
return 5
else:
_lowerCAmelCase : str = int(math.log(number // 3 , 2 ) ) + 2
_lowerCAmelCase : Union[str, Any] = [3, 5]
_lowerCAmelCase : Any = 2
_lowerCAmelCase : List[Any] = 3
for block in range(1 , __a ):
for _ in range(__a ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
lowerCAmelCase : int = 0
try:
lowerCAmelCase : Dict = proth(number)
except ValueError:
print(F'''ValueError: there is no {number}th Proth number''')
continue
print(F'''The {number}th Proth number: {value}''')
| 355 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : List[Any] = {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""",
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "mvp"
__magic_name__ = ["past_key_values"]
__magic_name__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , snake_case__=5_0267 , snake_case__=1024 , snake_case__=12 , snake_case__=4096 , snake_case__=16 , snake_case__=12 , snake_case__=4096 , snake_case__=16 , snake_case__=0.0 , snake_case__=0.0 , snake_case__="gelu" , snake_case__=1024 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=0.0 , snake_case__=False , snake_case__=True , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__=True , snake_case__=2 , snake_case__=2 , snake_case__=False , snake_case__=100 , snake_case__=800 , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Any = max_position_embeddings
_lowerCAmelCase : Optional[Any] = d_model
_lowerCAmelCase : Optional[int] = encoder_ffn_dim
_lowerCAmelCase : Optional[int] = encoder_layers
_lowerCAmelCase : Any = encoder_attention_heads
_lowerCAmelCase : Any = decoder_ffn_dim
_lowerCAmelCase : Optional[Any] = decoder_layers
_lowerCAmelCase : int = decoder_attention_heads
_lowerCAmelCase : Union[str, Any] = dropout
_lowerCAmelCase : List[Any] = attention_dropout
_lowerCAmelCase : List[str] = activation_dropout
_lowerCAmelCase : Optional[Any] = activation_function
_lowerCAmelCase : Any = init_std
_lowerCAmelCase : Any = encoder_layerdrop
_lowerCAmelCase : Union[str, Any] = decoder_layerdrop
_lowerCAmelCase : Optional[int] = classifier_dropout
_lowerCAmelCase : List[Any] = use_cache
_lowerCAmelCase : Optional[int] = encoder_layers
_lowerCAmelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCAmelCase : Optional[Any] = use_prompt
_lowerCAmelCase : Optional[Any] = prompt_length
_lowerCAmelCase : Any = prompt_mid_dim
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , decoder_start_token_id=snake_case__ , forced_eos_token_id=snake_case__ , **snake_case__ , )
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , snake_case__ ):
_lowerCAmelCase : Any = self.bos_token_id
warnings.warn(
F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'The config can simply be saved and uploaded again to be fixed.' )
| 25 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=7 , snake_case__=3 , snake_case__=18 , snake_case__=30 , snake_case__=400 , snake_case__=True , snake_case__=None , snake_case__=True , snake_case__=None , snake_case__=True , snake_case__=[0.5, 0.5, 0.5] , snake_case__=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
_lowerCAmelCase : List[str] = size if size is not None else {'shortest_edge': 18}
_lowerCAmelCase : Optional[Any] = crop_size if crop_size is not None else {'height': 18, 'width': 18}
_lowerCAmelCase : int = parent
_lowerCAmelCase : Dict = batch_size
_lowerCAmelCase : Optional[Any] = num_channels
_lowerCAmelCase : Optional[Any] = image_size
_lowerCAmelCase : Tuple = min_resolution
_lowerCAmelCase : str = max_resolution
_lowerCAmelCase : Tuple = do_resize
_lowerCAmelCase : List[Any] = size
_lowerCAmelCase : Any = do_center_crop
_lowerCAmelCase : str = crop_size
_lowerCAmelCase : Tuple = do_normalize
_lowerCAmelCase : List[Any] = image_mean
_lowerCAmelCase : Optional[int] = image_std
def a ( self ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCamelCase__ ( lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = LevitImageProcessor if is_vision_available() else None
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = LevitImageProcessingTester(self )
@property
def a ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , 'image_mean' ) )
self.assertTrue(hasattr(snake_case__ , 'image_std' ) )
self.assertTrue(hasattr(snake_case__ , 'do_normalize' ) )
self.assertTrue(hasattr(snake_case__ , 'do_resize' ) )
self.assertTrue(hasattr(snake_case__ , 'do_center_crop' ) )
self.assertTrue(hasattr(snake_case__ , 'size' ) )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
_lowerCAmelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def a ( self ):
'''simple docstring'''
pass
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
_lowerCAmelCase : List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase : List[str] = image_processing(snake_case__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , np.ndarray )
# Test not batched input
_lowerCAmelCase : str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase : List[str] = image_processing(snake_case__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test not batched input
_lowerCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase : Optional[int] = image_processing(snake_case__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 356 |
'''simple docstring'''
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"""The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"""
)
lowerCAmelCase : str = None
lowerCAmelCase : Optional[int] = {
"""7B""": 1_10_08,
"""13B""": 1_38_24,
"""30B""": 1_79_20,
"""65B""": 2_20_16,
"""70B""": 2_86_72,
}
lowerCAmelCase : Optional[int] = {
"""7B""": 1,
"""7Bf""": 1,
"""13B""": 2,
"""13Bf""": 2,
"""30B""": 4,
"""65B""": 8,
"""70B""": 8,
"""70Bf""": 8,
}
def lowercase (_A , _A=1 , _A=2_5_6 ):
"""simple docstring"""
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def lowercase (_A ):
"""simple docstring"""
with open(_A , 'r' ) as f:
return json.load(_A )
def lowercase (_A , _A ):
"""simple docstring"""
with open(_A , 'w' ) as f:
json.dump(_A , _A )
def lowercase (_A , _A , _A , _A=True ):
"""simple docstring"""
os.makedirs(_A , exist_ok=_A )
_lowerCAmelCase : Optional[Any] = os.path.join(_A , 'tmp' )
os.makedirs(_A , exist_ok=_A )
_lowerCAmelCase : Any = read_json(os.path.join(_A , 'params.json' ) )
_lowerCAmelCase : List[str] = NUM_SHARDS[model_size]
_lowerCAmelCase : str = params['n_layers']
_lowerCAmelCase : Optional[int] = params['n_heads']
_lowerCAmelCase : int = n_heads // num_shards
_lowerCAmelCase : Optional[int] = params['dim']
_lowerCAmelCase : Union[str, Any] = dim // n_heads
_lowerCAmelCase : Union[str, Any] = 10_000.0
_lowerCAmelCase : str = 1.0 / (base ** (torch.arange(0 , _A , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
_lowerCAmelCase : Optional[Any] = params['n_kv_heads'] # for GQA / MQA
_lowerCAmelCase : str = n_heads_per_shard // num_key_value_heads
_lowerCAmelCase : Optional[int] = dim // num_key_value_heads
else: # compatibility with other checkpoints
_lowerCAmelCase : Union[str, Any] = n_heads
_lowerCAmelCase : Any = n_heads_per_shard
_lowerCAmelCase : Optional[Any] = dim
# permute for sliced rotary
def permute(_A , _A=n_heads , _A=dim , _A=dim ):
return w.view(_A , dima // n_heads // 2 , 2 , _A ).transpose(1 , 2 ).reshape(_A , _A )
print(f'Fetching all parameters from the checkpoint at {input_base_path}.' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
_lowerCAmelCase : List[Any] = torch.load(os.path.join(_A , 'consolidated.00.pth' ) , map_location='cpu' )
else:
# Sharded
_lowerCAmelCase : List[Any] = [
torch.load(os.path.join(_A , f'consolidated.{i:02d}.pth' ) , map_location='cpu' )
for i in range(_A )
]
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Union[str, Any] = {'weight_map': {}}
for layer_i in range(_A ):
_lowerCAmelCase : List[str] = f'pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
_lowerCAmelCase : str = {
f'model.layers.{layer_i}.self_attn.q_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wq.weight'] ),
f'model.layers.{layer_i}.self_attn.k_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wk.weight'] ),
f'model.layers.{layer_i}.self_attn.v_proj.weight': loaded[f'layers.{layer_i}.attention.wv.weight'],
f'model.layers.{layer_i}.self_attn.o_proj.weight': loaded[f'layers.{layer_i}.attention.wo.weight'],
f'model.layers.{layer_i}.mlp.gate_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w1.weight'],
f'model.layers.{layer_i}.mlp.down_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w2.weight'],
f'model.layers.{layer_i}.mlp.up_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w3.weight'],
f'model.layers.{layer_i}.input_layernorm.weight': loaded[f'layers.{layer_i}.attention_norm.weight'],
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[f'layers.{layer_i}.ffn_norm.weight'],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
_lowerCAmelCase : str = {
f'model.layers.{layer_i}.input_layernorm.weight': loaded[0][
f'layers.{layer_i}.attention_norm.weight'
].clone(),
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[0][
f'layers.{layer_i}.ffn_norm.weight'
].clone(),
}
_lowerCAmelCase : List[str] = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wq.weight'].view(_A , _A , _A )
for i in range(_A )
] , dim=0 , ).reshape(_A , _A ) )
_lowerCAmelCase : Optional[int] = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wk.weight'].view(
_A , _A , _A )
for i in range(_A )
] , dim=0 , ).reshape(_A , _A ) , _A , _A , _A , )
_lowerCAmelCase : Dict = torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wv.weight'].view(
_A , _A , _A )
for i in range(_A )
] , dim=0 , ).reshape(_A , _A )
_lowerCAmelCase : Dict = torch.cat(
[loaded[i][f'layers.{layer_i}.attention.wo.weight'] for i in range(_A )] , dim=1 )
_lowerCAmelCase : List[Any] = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w1.weight'] for i in range(_A )] , dim=0 )
_lowerCAmelCase : Tuple = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w2.weight'] for i in range(_A )] , dim=1 )
_lowerCAmelCase : List[Any] = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w3.weight'] for i in range(_A )] , dim=0 )
_lowerCAmelCase : int = inv_freq
for k, v in state_dict.items():
_lowerCAmelCase : Optional[Any] = filename
param_count += v.numel()
torch.save(_A , os.path.join(_A , _A ) )
_lowerCAmelCase : Dict = f'pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
_lowerCAmelCase : List[str] = {
'model.embed_tokens.weight': loaded['tok_embeddings.weight'],
'model.norm.weight': loaded['norm.weight'],
'lm_head.weight': loaded['output.weight'],
}
else:
_lowerCAmelCase : List[str] = {
'model.norm.weight': loaded[0]['norm.weight'],
'model.embed_tokens.weight': torch.cat(
[loaded[i]['tok_embeddings.weight'] for i in range(_A )] , dim=1 ),
'lm_head.weight': torch.cat([loaded[i]['output.weight'] for i in range(_A )] , dim=0 ),
}
for k, v in state_dict.items():
_lowerCAmelCase : int = filename
param_count += v.numel()
torch.save(_A , os.path.join(_A , _A ) )
# Write configs
_lowerCAmelCase : Tuple = {'total_size': param_count * 2}
write_json(_A , os.path.join(_A , 'pytorch_model.bin.index.json' ) )
_lowerCAmelCase : Optional[int] = params['ffn_dim_multiplier'] if 'ffn_dim_multiplier' in params else 1
_lowerCAmelCase : int = params['multiple_of'] if 'multiple_of' in params else 2_5_6
_lowerCAmelCase : List[Any] = LlamaConfig(
hidden_size=_A , intermediate_size=compute_intermediate_size(_A , _A , _A ) , num_attention_heads=params['n_heads'] , num_hidden_layers=params['n_layers'] , rms_norm_eps=params['norm_eps'] , num_key_value_heads=_A , )
config.save_pretrained(_A )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('Loading the checkpoint in a Llama model.' )
_lowerCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained(_A , torch_dtype=torch.floataa , low_cpu_mem_usage=_A )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('Saving in the Transformers format.' )
model.save_pretrained(_A , safe_serialization=_A )
shutil.rmtree(_A )
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'Saving a {tokenizer_class.__name__} to {tokenizer_path}.' )
_lowerCAmelCase : List[Any] = tokenizer_class(_A )
tokenizer.save_pretrained(_A )
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument(
'--input_dir' , help='Location of LLaMA weights, which contains tokenizer.model and model folders' , )
parser.add_argument(
'--model_size' , choices=['7B', '7Bf', '13B', '13Bf', '30B', '65B', '70B', '70Bf', 'tokenizer_only'] , )
parser.add_argument(
'--output_dir' , help='Location to write HF model and tokenizer' , )
parser.add_argument('--safe_serialization' , type=_A , help='Whether or not to save using `safetensors`.' )
_lowerCAmelCase : Any = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
_lowerCAmelCase : Dict = os.path.join(args.input_dir , 'tokenizer.model' )
write_tokenizer(args.output_dir , _A )
if __name__ == "__main__":
main()
| 25 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = IFPipeline
__magic_name__ = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
__magic_name__ = TEXT_TO_IMAGE_BATCH_PARAMS
__magic_name__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def a ( self ):
'''simple docstring'''
return self._get_dummy_components()
def a ( self , snake_case__ , snake_case__=0 ):
'''simple docstring'''
if str(_snake_case ).startswith('mps' ):
_lowerCAmelCase : List[Any] = torch.manual_seed(_snake_case )
else:
_lowerCAmelCase : Tuple = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
_lowerCAmelCase : int = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def a ( self ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def a ( self ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def a ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def a ( self ):
'''simple docstring'''
self._test_save_load_local()
def a ( self ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def a ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0' , variant='fp16' , torch_dtype=torch.floataa )
_lowerCAmelCase : List[str] = IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0' , variant='fp16' , torch_dtype=torch.floataa , text_encoder=_snake_case , tokenizer=_snake_case )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = pipe_a.encode_prompt('anime turtle' , device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
_lowerCAmelCase : int = None
_lowerCAmelCase : str = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(_snake_case , _snake_case , _snake_case , _snake_case )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
_lowerCAmelCase : Optional[int] = IFImgaImgPipeline(**pipe_a.components )
_lowerCAmelCase : Dict = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(_snake_case , _snake_case , _snake_case , _snake_case )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
_lowerCAmelCase : Any = IFInpaintingPipeline(**pipe_a.components )
_lowerCAmelCase : int = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(_snake_case , _snake_case , _snake_case , _snake_case )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_start_torch_memory_measurement()
_lowerCAmelCase : List[str] = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase : Any = pipe_a(
prompt_embeds=_snake_case , negative_prompt_embeds=_snake_case , num_inference_steps=2 , generator=_snake_case , output_type='np' , )
_lowerCAmelCase : List[str] = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
_lowerCAmelCase : List[str] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(_snake_case , _snake_case )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase : Tuple = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_snake_case )
_lowerCAmelCase : Union[str, Any] = pipe_a(
prompt_embeds=_snake_case , negative_prompt_embeds=_snake_case , image=_snake_case , generator=_snake_case , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase : Optional[int] = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase : Union[str, Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(_snake_case , _snake_case )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_start_torch_memory_measurement()
_lowerCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_snake_case )
_lowerCAmelCase : List[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase : Optional[Any] = pipe_a(
prompt_embeds=_snake_case , negative_prompt_embeds=_snake_case , image=_snake_case , num_inference_steps=2 , generator=_snake_case , output_type='np' , )
_lowerCAmelCase : str = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCAmelCase : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(_snake_case , _snake_case )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase : Tuple = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase : List[str] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(_snake_case )
_lowerCAmelCase : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_snake_case )
_lowerCAmelCase : Union[str, Any] = pipe_a(
prompt_embeds=_snake_case , negative_prompt_embeds=_snake_case , image=_snake_case , original_image=_snake_case , generator=_snake_case , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase : List[Any] = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase : Optional[int] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(_snake_case , _snake_case )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_start_torch_memory_measurement()
_lowerCAmelCase : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_snake_case )
_lowerCAmelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(_snake_case )
_lowerCAmelCase : str = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase : Optional[int] = pipe_a(
prompt_embeds=_snake_case , negative_prompt_embeds=_snake_case , image=_snake_case , mask_image=_snake_case , num_inference_steps=2 , generator=_snake_case , output_type='np' , )
_lowerCAmelCase : Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase : Union[str, Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCAmelCase : Union[str, Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(_snake_case , _snake_case )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_snake_case )
_lowerCAmelCase : int = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(_snake_case )
_lowerCAmelCase : Dict = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(_snake_case )
_lowerCAmelCase : Tuple = pipe_a(
prompt_embeds=_snake_case , negative_prompt_embeds=_snake_case , image=_snake_case , mask_image=_snake_case , original_image=_snake_case , generator=_snake_case , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase : str = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(_snake_case , _snake_case )
def lowercase ():
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 357 |
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
__magic_name__ = None
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = None
__magic_name__ = None
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = True
__magic_name__ = None
__magic_name__ = 1
__magic_name__ = None
__magic_name__ = False
__magic_name__ = None
__magic_name__ = None
def a ( self ):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(snake_case__ ) for k, v in self.__dict__.items()} )
| 25 | 0 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 358 |
'''simple docstring'''
lowerCAmelCase : List[str] = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCAmelCase : int = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCAmelCase : List[str] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 25 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
class UpperCamelCase__ ( __A ):
"""simple docstring"""
__magic_name__ = ['pixel_values']
def __init__( self , snake_case__ = True , snake_case__ = None , snake_case__ = PILImageResampling.BILINEAR , snake_case__ = True , snake_case__ = None , snake_case__ = True , snake_case__ = 1 / 255 , snake_case__ = True , snake_case__ = None , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
super().__init__(**__lowercase )
_lowerCAmelCase : Dict = size if size is not None else {'shortest_edge': 256}
_lowerCAmelCase : Any = get_size_dict(__lowercase , default_to_square=__lowercase )
_lowerCAmelCase : Any = crop_size if crop_size is not None else {'height': 224, 'width': 224}
_lowerCAmelCase : int = get_size_dict(__lowercase , param_name='crop_size' )
_lowerCAmelCase : List[str] = do_resize
_lowerCAmelCase : Tuple = size
_lowerCAmelCase : List[str] = resample
_lowerCAmelCase : Dict = do_center_crop
_lowerCAmelCase : List[Any] = crop_size
_lowerCAmelCase : str = do_rescale
_lowerCAmelCase : Any = rescale_factor
_lowerCAmelCase : Dict = do_normalize
_lowerCAmelCase : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCAmelCase : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a ( self , snake_case__ , snake_case__ , snake_case__ = PILImageResampling.BICUBIC , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = get_size_dict(__lowercase , default_to_square=__lowercase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
_lowerCAmelCase : Optional[int] = get_resize_output_image_size(__lowercase , size=size['shortest_edge'] , default_to_square=__lowercase )
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
def a ( self , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : Tuple = get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(__lowercase , size=(size['height'], size['width']) , data_format=__lowercase , **__lowercase )
def a ( self , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ ):
'''simple docstring'''
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def a ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = ChannelDimension.FIRST , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase : int = size if size is not None else self.size
_lowerCAmelCase : Union[str, Any] = get_size_dict(__lowercase , default_to_square=__lowercase )
_lowerCAmelCase : Optional[int] = resample if resample is not None else self.resample
_lowerCAmelCase : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase : Dict = crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase : int = get_size_dict(__lowercase , param_name='crop_size' )
_lowerCAmelCase : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase : str = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase : Optional[Any] = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase : Optional[int] = image_std if image_std is not None else self.image_std
_lowerCAmelCase : List[Any] = make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
_lowerCAmelCase : List[str] = [to_numpy_array(__lowercase ) for image in images]
if do_resize:
_lowerCAmelCase : Union[str, Any] = [self.resize(image=__lowercase , size=__lowercase , resample=__lowercase ) for image in images]
if do_center_crop:
_lowerCAmelCase : Tuple = [self.center_crop(image=__lowercase , size=__lowercase ) for image in images]
if do_rescale:
_lowerCAmelCase : Optional[Any] = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_normalize:
_lowerCAmelCase : Optional[int] = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images]
_lowerCAmelCase : Optional[int] = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
_lowerCAmelCase : List[Any] = {'pixel_values': images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__lowercase ) != len(__lowercase ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(__lowercase ):
_lowerCAmelCase : Any = target_sizes.numpy()
_lowerCAmelCase : Optional[int] = []
for idx in range(len(__lowercase ) ):
_lowerCAmelCase : Any = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=__lowercase )
_lowerCAmelCase : Tuple = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__lowercase )
else:
_lowerCAmelCase : Optional[int] = logits.argmax(dim=1 )
_lowerCAmelCase : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 359 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase : Union[str, Any] = {
"""configuration_resnet""": ["""RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ResNetConfig""", """ResNetOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = [
"""RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ResNetForImageClassification""",
"""ResNetModel""",
"""ResNetPreTrainedModel""",
"""ResNetBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
"""TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFResNetForImageClassification""",
"""TFResNetModel""",
"""TFResNetPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
"""FlaxResNetForImageClassification""",
"""FlaxResNetModel""",
"""FlaxResNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
lowerCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 25 | 0 |
'''simple docstring'''
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case__ = "" , snake_case__ = False ):
'''simple docstring'''
_lowerCAmelCase : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
_lowerCAmelCase : Optional[int] = is_leaf
_lowerCAmelCase : str = prefix
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 0
for q, w in zip(self.prefix , snake_case__ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def a ( self , snake_case__ ):
'''simple docstring'''
for word in words:
self.insert(snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
if self.prefix == word:
_lowerCAmelCase : List[Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
_lowerCAmelCase : Tuple = RadixNode(prefix=snake_case__ , is_leaf=snake_case__ )
else:
_lowerCAmelCase : str = self.nodes[word[0]]
_lowerCAmelCase : Optional[Any] = incoming_node.match(
snake_case__ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(snake_case__ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
_lowerCAmelCase : Optional[Any] = remaining_prefix
_lowerCAmelCase : Optional[Any] = self.nodes[matching_string[0]]
_lowerCAmelCase : str = RadixNode(snake_case__ , snake_case__ )
_lowerCAmelCase : List[str] = aux_node
if remaining_word == "":
_lowerCAmelCase : int = True
else:
self.nodes[matching_string[0]].insert(snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.nodes.get(word[0] , snake_case__ )
if not incoming_node:
return False
else:
_lowerCAmelCase : Dict = incoming_node.match(
snake_case__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.nodes.get(word[0] , snake_case__ )
if not incoming_node:
return False
else:
_lowerCAmelCase : str = incoming_node.match(
snake_case__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(snake_case__ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
_lowerCAmelCase : List[str] = list(self.nodes.values() )[0]
_lowerCAmelCase : int = merging_node.is_leaf
self.prefix += merging_node.prefix
_lowerCAmelCase : Tuple = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
_lowerCAmelCase : Tuple = False
# If there is 1 edge, we merge it with its child
else:
_lowerCAmelCase : List[str] = list(incoming_node.nodes.values() )[0]
_lowerCAmelCase : Any = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
_lowerCAmelCase : Any = merging_node.nodes
return True
def a ( self , snake_case__ = 0 ):
'''simple docstring'''
if self.prefix != "":
print('-' * height , self.prefix , ' (leaf)' if self.is_leaf else '' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Tuple = "banana bananas bandana band apple all beast".split()
_lowerCAmelCase : Union[str, Any] = RadixNode()
root.insert_many(__lowerCamelCase )
assert all(root.find(__lowerCamelCase ) for word in words )
assert not root.find('bandanas' )
assert not root.find('apps' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def lowercase ():
"""simple docstring"""
assert test_trie()
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : List[str] = RadixNode()
_lowerCAmelCase : List[Any] = "banana bananas bandanas bandana band apple all beast".split()
root.insert_many(__lowerCamelCase )
print('Words:' , __lowerCamelCase )
print('Tree:' )
root.print_tree()
if __name__ == "__main__":
main()
| 360 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {
"""shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "nat"
__magic_name__ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , snake_case__=4 , snake_case__=3 , snake_case__=64 , snake_case__=[3, 4, 6, 5] , snake_case__=[2, 4, 8, 16] , snake_case__=7 , snake_case__=3.0 , snake_case__=True , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.1 , snake_case__="gelu" , snake_case__=0.02 , snake_case__=1E-5 , snake_case__=0.0 , snake_case__=None , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Union[str, Any] = patch_size
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Tuple = embed_dim
_lowerCAmelCase : Any = depths
_lowerCAmelCase : Dict = len(snake_case__ )
_lowerCAmelCase : str = num_heads
_lowerCAmelCase : Dict = kernel_size
_lowerCAmelCase : Union[str, Any] = mlp_ratio
_lowerCAmelCase : int = qkv_bias
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCAmelCase : List[str] = drop_path_rate
_lowerCAmelCase : Union[str, Any] = hidden_act
_lowerCAmelCase : Tuple = layer_norm_eps
_lowerCAmelCase : Dict = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase : str = int(embed_dim * 2 ** (len(snake_case__ ) - 1) )
_lowerCAmelCase : Any = layer_scale_init_value
_lowerCAmelCase : Any = ['stem'] + [F'stage{idx}' for idx in range(1 , len(snake_case__ ) + 1 )]
_lowerCAmelCase , _lowerCAmelCase : str = get_aligned_output_features_output_indices(
out_features=snake_case__ , out_indices=snake_case__ , stage_names=self.stage_names )
| 25 | 0 |
lowerCAmelCase : Optional[int] = [
"""Audio""",
"""Array2D""",
"""Array3D""",
"""Array4D""",
"""Array5D""",
"""ClassLabel""",
"""Features""",
"""Sequence""",
"""Value""",
"""Image""",
"""Translation""",
"""TranslationVariableLanguages""",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 361 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Dict = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : str = {
"""vocab_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"""
),
},
"""tokenizer_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""",
"""roberta-base-openai-detector""": (
"""https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"""
),
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : List[str] = {
"""roberta-base""": 5_12,
"""roberta-large""": 5_12,
"""roberta-large-mnli""": 5_12,
"""distilroberta-base""": 5_12,
"""roberta-base-openai-detector""": 5_12,
"""roberta-large-openai-detector""": 5_12,
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ["input_ids", "attention_mask"]
__magic_name__ = RobertaTokenizer
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__="replace" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=False , snake_case__=True , **snake_case__ , ):
'''simple docstring'''
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , )
_lowerCAmelCase : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
_lowerCAmelCase : Tuple = getattr(snake_case__ , pre_tok_state.pop('type' ) )
_lowerCAmelCase : List[Any] = add_prefix_space
_lowerCAmelCase : List[str] = pre_tok_class(**snake_case__ )
_lowerCAmelCase : Union[str, Any] = add_prefix_space
_lowerCAmelCase : Union[str, Any] = 'post_processor'
_lowerCAmelCase : int = getattr(self.backend_tokenizer , snake_case__ , snake_case__ )
if tokenizer_component_instance:
_lowerCAmelCase : Dict = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCAmelCase : Any = tuple(state['sep'] )
if "cls" in state:
_lowerCAmelCase : str = tuple(state['cls'] )
_lowerCAmelCase : List[str] = False
if state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
_lowerCAmelCase : int = add_prefix_space
_lowerCAmelCase : Tuple = True
if state.get('trim_offsets' , snake_case__ ) != trim_offsets:
_lowerCAmelCase : Union[str, Any] = trim_offsets
_lowerCAmelCase : Optional[int] = True
if changes_to_apply:
_lowerCAmelCase : Any = getattr(snake_case__ , state.pop('type' ) )
_lowerCAmelCase : Optional[int] = component_class(**snake_case__ )
setattr(self.backend_tokenizer , snake_case__ , snake_case__ )
@property
def a ( self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value
_lowerCAmelCase : Tuple = value
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = kwargs.get('is_split_into_words' , snake_case__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = kwargs.get('is_split_into_words' , snake_case__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*snake_case__ , **snake_case__ )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : int = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def a ( self , snake_case__ , snake_case__=None ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 25 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = StableDiffusionInpaintPipeline
__magic_name__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__magic_name__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__magic_name__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__magic_name__ = frozenset([] )
def a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=snake_case__ , )
_lowerCAmelCase : Optional[Any] = PNDMScheduler(skip_prk_steps=snake_case__ )
torch.manual_seed(0 )
_lowerCAmelCase : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_lowerCAmelCase : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
_lowerCAmelCase : int = CLIPTextModel(snake_case__ )
_lowerCAmelCase : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowerCAmelCase : Optional[int] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def a ( self , snake_case__ , snake_case__=0 ):
'''simple docstring'''
_lowerCAmelCase : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
_lowerCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase : Optional[int] = Image.fromarray(np.uinta(snake_case__ ) ).convert('RGB' ).resize((64, 64) )
_lowerCAmelCase : Optional[Any] = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((64, 64) )
if str(snake_case__ ).startswith('mps' ):
_lowerCAmelCase : str = torch.manual_seed(snake_case__ )
else:
_lowerCAmelCase : int = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
_lowerCAmelCase : str = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Union[str, Any] = self.get_dummy_components()
_lowerCAmelCase : Union[str, Any] = StableDiffusionInpaintPipeline(**snake_case__ )
_lowerCAmelCase : Tuple = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
_lowerCAmelCase : Dict = self.get_dummy_inputs(snake_case__ )
_lowerCAmelCase : Any = sd_pipe(**snake_case__ ).images
_lowerCAmelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : List[Any] = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_lowerCAmelCase : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_lowerCAmelCase : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy' )
_lowerCAmelCase : List[Any] = 'stabilityai/stable-diffusion-2-inpainting'
_lowerCAmelCase : List[str] = StableDiffusionInpaintPipeline.from_pretrained(snake_case__ , safety_checker=snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
_lowerCAmelCase : Optional[int] = 'Face of a yellow cat, high resolution, sitting on a park bench'
_lowerCAmelCase : Tuple = torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = pipe(
prompt=snake_case__ , image=snake_case__ , mask_image=snake_case__ , generator=snake_case__ , output_type='np' , )
_lowerCAmelCase : Optional[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_lowerCAmelCase : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_lowerCAmelCase : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy' )
_lowerCAmelCase : int = 'stabilityai/stable-diffusion-2-inpainting'
_lowerCAmelCase : List[Any] = StableDiffusionInpaintPipeline.from_pretrained(
snake_case__ , torch_dtype=torch.floataa , safety_checker=snake_case__ , )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
_lowerCAmelCase : List[str] = 'Face of a yellow cat, high resolution, sitting on a park bench'
_lowerCAmelCase : str = torch.manual_seed(0 )
_lowerCAmelCase : List[str] = pipe(
prompt=snake_case__ , image=snake_case__ , mask_image=snake_case__ , generator=snake_case__ , output_type='np' , )
_lowerCAmelCase : Tuple = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def a ( self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowerCAmelCase : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_lowerCAmelCase : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_lowerCAmelCase : Optional[Any] = 'stabilityai/stable-diffusion-2-inpainting'
_lowerCAmelCase : List[Any] = PNDMScheduler.from_pretrained(snake_case__ , subfolder='scheduler' )
_lowerCAmelCase : Union[str, Any] = StableDiffusionInpaintPipeline.from_pretrained(
snake_case__ , safety_checker=snake_case__ , scheduler=snake_case__ , torch_dtype=torch.floataa , )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_lowerCAmelCase : str = 'Face of a yellow cat, high resolution, sitting on a park bench'
_lowerCAmelCase : List[str] = torch.manual_seed(0 )
_lowerCAmelCase : Dict = pipe(
prompt=snake_case__ , image=snake_case__ , mask_image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase : int = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 362 |
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = 0 # The first color of the flag.
lowerCAmelCase : Optional[int] = 1 # The second color of the flag.
lowerCAmelCase : int = 2 # The third color of the flag.
lowerCAmelCase : Any = (red, white, blue)
def lowercase (_A ):
"""simple docstring"""
if not sequence:
return []
if len(_A ) == 1:
return list(_A )
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : List[str] = len(_A ) - 1
_lowerCAmelCase : Optional[Any] = 0
while mid <= high:
if sequence[mid] == colors[0]:
_lowerCAmelCase , _lowerCAmelCase : Tuple = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
_lowerCAmelCase , _lowerCAmelCase : Tuple = sequence[high], sequence[mid]
high -= 1
else:
_lowerCAmelCase : Optional[int] = f'The elements inside the sequence must contains only {colors} values'
raise ValueError(_A )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : str = input("""Enter numbers separated by commas:\n""").strip()
lowerCAmelCase : Dict = [int(item.strip()) for item in user_input.split(""",""")]
print(F'''{dutch_national_flag_sort(unsorted)}''')
| 25 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase : Optional[Any] = {
"""configuration_conditional_detr""": [
"""CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ConditionalDetrConfig""",
"""ConditionalDetrOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple = ["""ConditionalDetrFeatureExtractor"""]
lowerCAmelCase : Optional[Any] = ["""ConditionalDetrImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int = [
"""CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConditionalDetrForObjectDetection""",
"""ConditionalDetrForSegmentation""",
"""ConditionalDetrModel""",
"""ConditionalDetrPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 363 |
'''simple docstring'''
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Optional[int] = [3_1, 2_8, 3_1, 3_0, 3_1, 3_0, 3_1, 3_1, 3_0, 3_1, 3_0, 3_1]
_lowerCAmelCase : int = 6
_lowerCAmelCase : Dict = 1
_lowerCAmelCase : Optional[int] = 1_9_0_1
_lowerCAmelCase : Optional[Any] = 0
while year < 2_0_0_1:
day += 7
if (year % 4 == 0 and year % 1_0_0 != 0) or (year % 4_0_0 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
_lowerCAmelCase : List[str] = day - days_per_month[month - 2]
elif day > 2_9 and month == 2:
month += 1
_lowerCAmelCase : List[str] = day - 2_9
else:
if day > days_per_month[month - 1]:
month += 1
_lowerCAmelCase : List[str] = day - days_per_month[month - 2]
if month > 1_2:
year += 1
_lowerCAmelCase : Optional[int] = 1
if year < 2_0_0_1 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 25 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowerCAmelCase : Dict = logging.get_logger(__name__)
class UpperCamelCase__ ( a_ ):
"""simple docstring"""
def __init__( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 364 |
'''simple docstring'''
def lowercase (_A = 1_0_0_0_0_0_0 ):
"""simple docstring"""
_lowerCAmelCase : Any = set(range(3 , _A , 2 ) )
primes.add(2 )
for p in range(3 , _A , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _A , _A ) ) )
_lowerCAmelCase : Union[str, Any] = [float(_A ) for n in range(limit + 1 )]
for p in primes:
for n in range(_A , limit + 1 , _A ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 25 | 0 |
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
lowerCAmelCase : Optional[int] = WebClient(token=os.environ["""CI_SLACK_BOT_TOKEN"""])
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = test_results.split(' ' )
_lowerCAmelCase : Any = 0
_lowerCAmelCase : Any = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
_lowerCAmelCase : Optional[Any] = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(__a ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : List[str] = {}
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : int = False
for line in failures_short_lines.split('\n' ):
if re.search(r'_ \[doctest\]' , __a ):
_lowerCAmelCase : List[str] = True
_lowerCAmelCase : List[Any] = line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
_lowerCAmelCase : int = line
_lowerCAmelCase : str = False
return failures
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = title
_lowerCAmelCase : int = doc_test_results['''time_spent'''].split(',' )[0]
_lowerCAmelCase : List[str] = doc_test_results['''success''']
_lowerCAmelCase : Any = doc_test_results['''failures''']
_lowerCAmelCase : Any = self.n_success + self.n_failures
# Failures and success of the modeling tests
_lowerCAmelCase : Union[str, Any] = doc_test_results
@property
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = [self._time_spent]
_lowerCAmelCase : str = 0
for time in time_spent:
_lowerCAmelCase : Tuple = time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_a ) == 1:
_lowerCAmelCase : List[Any] = [0, 0, time_parts[0]]
_lowerCAmelCase : Optional[int] = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
_lowerCAmelCase : int = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return F'{int(_a )}h{int(_a )}m{int(_a )}s'
@property
def a ( self ):
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def a ( self ):
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def a ( self ):
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
F' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = 40
_lowerCAmelCase : List[Any] = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(_a , _a )}
_lowerCAmelCase : List[Any] = ''''''
for category, failures in category_failures.items():
if len(_a ) == 0:
continue
if report != "":
report += "\n\n"
report += F'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_a )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_a )
@staticmethod
def a ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(_a )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=_a , )
def a ( self ):
'''simple docstring'''
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
_lowerCAmelCase : Optional[Any] = F'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else '''All tests passed.'''
_lowerCAmelCase : int = client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=_a , )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = ''''''
for key, value in failures.items():
_lowerCAmelCase : int = value[:200] + ''' [Truncated]''' if len(_a ) > 250 else value
failures_text += F'*{key}*\n_{value}_\n\n'
_lowerCAmelCase : int = job_name
_lowerCAmelCase : int = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
_lowerCAmelCase : str = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def a ( self ):
'''simple docstring'''
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
_lowerCAmelCase : Union[str, Any] = self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
_lowerCAmelCase : Any = sorted(self.doc_test_results.items() , key=lambda snake_case__ : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
_lowerCAmelCase : List[Any] = F'*Num failures* :{len(job_result["failed"] )} \n'
_lowerCAmelCase : List[str] = job_result['''failures''']
_lowerCAmelCase : Optional[Any] = self.get_reply_blocks(_a , _a , _a , text=_a )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=F'Results for {job}' , blocks=_a , thread_ts=self.thread_ts['ts'] , )
time.sleep(1 )
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = os.environ['''GITHUB_RUN_ID''']
_lowerCAmelCase : Tuple = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
_lowerCAmelCase : str = requests.get(__a ).json()
_lowerCAmelCase : Union[str, Any] = {}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
_lowerCAmelCase : Dict = math.ceil((result['total_count'] - 1_0_0) / 1_0_0 )
for i in range(__a ):
_lowerCAmelCase : Optional[Any] = requests.get(url + f'&page={i + 2}' ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.' , __a )
return {}
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : List[str] = {}
if os.path.exists(__a ):
_lowerCAmelCase : Tuple = os.listdir(__a )
for file in files:
try:
with open(os.path.join(__a , __a ) , encoding='utf-8' ) as f:
_lowerCAmelCase : List[Any] = f.read()
except UnicodeDecodeError as e:
raise ValueError(f'Could not open {os.path.join(__a , __a )}.' ) from e
return _artifact
def lowercase ():
"""simple docstring"""
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Dict = name
_lowerCAmelCase : str = []
def __str__( self ):
'''simple docstring'''
return self.name
def a ( self , snake_case__ ):
'''simple docstring'''
self.paths.append({'name': self.name, 'path': path} )
_lowerCAmelCase : Dict[str, Artifact] = {}
_lowerCAmelCase : List[Any] = filter(os.path.isdir , os.listdir() )
for directory in directories:
_lowerCAmelCase : Any = directory
if artifact_name not in _available_artifacts:
_lowerCAmelCase : List[str] = Artifact(__a )
_available_artifacts[artifact_name].add_path(__a )
return _available_artifacts
if __name__ == "__main__":
lowerCAmelCase : Optional[int] = get_job_links()
lowerCAmelCase : Tuple = retrieve_available_artifacts()
lowerCAmelCase : Union[str, Any] = collections.OrderedDict(
[
("""*.py""", """API Examples"""),
("""*.md""", """MD Examples"""),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
lowerCAmelCase : Dict = {
v: {
"""failed""": [],
"""failures""": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
lowerCAmelCase : str = github_actions_job_links.get("""run_doctests""")
lowerCAmelCase : int = available_artifacts["""doc_tests_gpu_test_reports"""].paths[0]
lowerCAmelCase : List[Any] = retrieve_artifact(artifact_path["""name"""])
if "stats" in artifact:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Dict = handle_test_results(artifact["""stats"""])
lowerCAmelCase : List[str] = failed
lowerCAmelCase : List[Any] = success
lowerCAmelCase : Optional[int] = time_spent[1:-1] + """, """
lowerCAmelCase : Dict = extract_first_line_failure(artifact["""failures_short"""])
for line in artifact["summary_short"].split("""\n"""):
if re.search("""FAILED""", line):
lowerCAmelCase : Union[str, Any] = line.replace("""FAILED """, """""")
lowerCAmelCase : int = line.split()[0].replace("""\n""", """""")
if "::" in line:
lowerCAmelCase , lowerCAmelCase : Any = line.split("""::""")
else:
lowerCAmelCase , lowerCAmelCase : Dict = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
lowerCAmelCase : str = docs[file_regex]
doc_test_results[category]["failed"].append(test)
lowerCAmelCase : List[str] = all_failures[test] if test in all_failures else """N/A"""
lowerCAmelCase : int = failure
break
lowerCAmelCase : Union[str, Any] = Message("""🤗 Results of the doc tests.""", doc_test_results)
message.post()
message.post_reply()
| 365 |
'''simple docstring'''
import argparse
import os
import re
lowerCAmelCase : Tuple = """src/transformers"""
# Pattern that looks at the indentation in a line.
lowerCAmelCase : str = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCAmelCase : str = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCAmelCase : Optional[int] = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCAmelCase : List[str] = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCAmelCase : Optional[int] = re.compile(r"""\[([^\]]+)\]""")
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : int = _re_indent.search(_A )
return "" if search is None else search.groups()[0]
def lowercase (_A , _A="" , _A=None , _A=None ):
"""simple docstring"""
_lowerCAmelCase : int = 0
_lowerCAmelCase : Dict = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(_A ):
index += 1
_lowerCAmelCase : Dict = ['\n'.join(lines[:index] )]
else:
_lowerCAmelCase : str = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowerCAmelCase : List[Any] = [lines[index]]
index += 1
while index < len(_A ) and (end_prompt is None or not lines[index].startswith(_A )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_A ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(_A ) )
if index < len(_A ) - 1:
_lowerCAmelCase : Union[str, Any] = [lines[index + 1]]
index += 1
else:
_lowerCAmelCase : Union[str, Any] = []
else:
blocks.append('\n'.join(_A ) )
_lowerCAmelCase : List[str] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_A ) > 0:
blocks.append('\n'.join(_A ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_A ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def lowercase (_A ):
"""simple docstring"""
def _inner(_A ):
return key(_A ).lower().replace('_' , '' )
return _inner
def lowercase (_A , _A=None ):
"""simple docstring"""
def noop(_A ):
return x
if key is None:
_lowerCAmelCase : List[Any] = noop
# Constants are all uppercase, they go first.
_lowerCAmelCase : List[Any] = [obj for obj in objects if key(_A ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowerCAmelCase : Tuple = [obj for obj in objects if key(_A )[0].isupper() and not key(_A ).isupper()]
# Functions begin with a lowercase, they go last.
_lowerCAmelCase : List[str] = [obj for obj in objects if not key(_A )[0].isupper()]
_lowerCAmelCase : Dict = ignore_underscore(_A )
return sorted(_A , key=_A ) + sorted(_A , key=_A ) + sorted(_A , key=_A )
def lowercase (_A ):
"""simple docstring"""
def _replace(_A ):
_lowerCAmelCase : Dict = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
_lowerCAmelCase : Union[str, Any] = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : int = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(_A )] ) + "]"
_lowerCAmelCase : Tuple = import_statement.split('\n' )
if len(_A ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowerCAmelCase : Optional[Any] = 2 if lines[1].strip() == '[' else 1
_lowerCAmelCase : List[str] = [(i, _re_strip_line.search(_A ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowerCAmelCase : Dict = sort_objects(_A , key=lambda _A : x[1] )
_lowerCAmelCase : Tuple = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_A ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowerCAmelCase : Tuple = _re_bracket_content.sub(_replace , lines[1] )
else:
_lowerCAmelCase : Optional[Any] = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : List[str] = keys[:-1]
_lowerCAmelCase : Optional[Any] = get_indent(lines[1] ) + ', '.join([f'"{k}"' for k in sort_objects(_A )] )
return "\n".join(_A )
else:
# Finally we have to deal with imports fitting on one line
_lowerCAmelCase : Union[str, Any] = _re_bracket_content.sub(_replace , _A )
return import_statement
def lowercase (_A , _A=True ):
"""simple docstring"""
with open(_A , encoding='utf-8' ) as f:
_lowerCAmelCase : Any = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowerCAmelCase : Tuple = split_code_in_indented_blocks(
_A , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_A ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowerCAmelCase : Tuple = main_blocks[block_idx]
_lowerCAmelCase : int = block.split('\n' )
# Get to the start of the imports.
_lowerCAmelCase : Tuple = 0
while line_idx < len(_A ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowerCAmelCase : Dict = len(_A )
else:
line_idx += 1
if line_idx >= len(_A ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowerCAmelCase : str = '\n'.join(block_lines[line_idx:-1] )
_lowerCAmelCase : Tuple = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowerCAmelCase : List[Any] = split_code_in_indented_blocks(_A , indent_level=_A )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowerCAmelCase : Optional[int] = _re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowerCAmelCase : int = [(pattern.search(_A ).groups()[0] if pattern.search(_A ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowerCAmelCase : Dict = [(i, key) for i, key in enumerate(_A ) if key is not None]
_lowerCAmelCase : Optional[int] = [x[0] for x in sorted(_A , key=lambda _A : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowerCAmelCase : int = 0
_lowerCAmelCase : Optional[Any] = []
for i in range(len(_A ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
_lowerCAmelCase : Optional[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(_A )
count += 1
# And we put our main block back together with its first and last line.
_lowerCAmelCase : Optional[int] = '\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(_A ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(_A , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(_A ) )
def lowercase (_A=True ):
"""simple docstring"""
_lowerCAmelCase : int = []
for root, _, files in os.walk(_A ):
if "__init__.py" in files:
_lowerCAmelCase : Optional[Any] = sort_imports(os.path.join(_A , '__init__.py' ) , check_only=_A )
if result:
_lowerCAmelCase : Optional[int] = [os.path.join(_A , '__init__.py' )]
if len(_A ) > 0:
raise ValueError(f'Would overwrite {len(_A )} files, run `make style`.' )
if __name__ == "__main__":
lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
lowerCAmelCase : List[str] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 25 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase : str = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__magic_name__ = "wavlm"
def __init__( self , snake_case__=32 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.02 , snake_case__=1E-5 , snake_case__="group" , snake_case__="gelu" , snake_case__=(512, 512, 512, 512, 512, 512, 512) , snake_case__=(5, 2, 2, 2, 2, 2, 2) , snake_case__=(10, 3, 3, 3, 3, 2, 2) , snake_case__=False , snake_case__=128 , snake_case__=16 , snake_case__=320 , snake_case__=800 , snake_case__=False , snake_case__=True , snake_case__=0.05 , snake_case__=10 , snake_case__=2 , snake_case__=0.0 , snake_case__=10 , snake_case__=320 , snake_case__=2 , snake_case__=0.1 , snake_case__=100 , snake_case__=256 , snake_case__=256 , snake_case__=0.1 , snake_case__="mean" , snake_case__=False , snake_case__=False , snake_case__=256 , snake_case__=(512, 512, 512, 512, 1500) , snake_case__=(5, 3, 3, 1, 1) , snake_case__=(1, 2, 3, 1, 1) , snake_case__=512 , snake_case__=80 , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=False , snake_case__=3 , snake_case__=2 , snake_case__=3 , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase )
_lowerCAmelCase : int = hidden_size
_lowerCAmelCase : Optional[int] = feat_extract_norm
_lowerCAmelCase : Optional[Any] = feat_extract_activation
_lowerCAmelCase : List[str] = list(__UpperCAmelCase )
_lowerCAmelCase : Tuple = list(__UpperCAmelCase )
_lowerCAmelCase : List[str] = list(__UpperCAmelCase )
_lowerCAmelCase : List[str] = conv_bias
_lowerCAmelCase : Any = num_buckets
_lowerCAmelCase : List[Any] = max_bucket_distance
_lowerCAmelCase : List[Any] = num_conv_pos_embeddings
_lowerCAmelCase : Optional[Any] = num_conv_pos_embedding_groups
_lowerCAmelCase : List[Any] = len(self.conv_dim )
_lowerCAmelCase : Union[str, Any] = num_hidden_layers
_lowerCAmelCase : List[str] = intermediate_size
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : str = num_attention_heads
_lowerCAmelCase : Tuple = hidden_dropout
_lowerCAmelCase : Optional[Any] = attention_dropout
_lowerCAmelCase : Any = activation_dropout
_lowerCAmelCase : Optional[int] = feat_proj_dropout
_lowerCAmelCase : List[Any] = final_dropout
_lowerCAmelCase : str = layerdrop
_lowerCAmelCase : Optional[Any] = layer_norm_eps
_lowerCAmelCase : int = initializer_range
_lowerCAmelCase : List[Any] = num_ctc_classes
_lowerCAmelCase : Union[str, Any] = vocab_size
_lowerCAmelCase : Optional[int] = do_stable_layer_norm
_lowerCAmelCase : Dict = use_weighted_layer_sum
_lowerCAmelCase : Any = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase : Dict = apply_spec_augment
_lowerCAmelCase : Union[str, Any] = mask_time_prob
_lowerCAmelCase : List[str] = mask_time_length
_lowerCAmelCase : Union[str, Any] = mask_time_min_masks
_lowerCAmelCase : Dict = mask_feature_prob
_lowerCAmelCase : Any = mask_feature_length
# parameters for pretraining with codevector quantized representations
_lowerCAmelCase : int = num_codevectors_per_group
_lowerCAmelCase : Tuple = num_codevector_groups
_lowerCAmelCase : List[str] = contrastive_logits_temperature
_lowerCAmelCase : List[str] = num_negatives
_lowerCAmelCase : Any = codevector_dim
_lowerCAmelCase : str = proj_codevector_dim
_lowerCAmelCase : Any = diversity_loss_weight
# ctc loss
_lowerCAmelCase : Dict = ctc_loss_reduction
_lowerCAmelCase : List[str] = ctc_zero_infinity
# adapter
_lowerCAmelCase : Optional[Any] = add_adapter
_lowerCAmelCase : List[Any] = adapter_kernel_size
_lowerCAmelCase : int = adapter_stride
_lowerCAmelCase : List[Any] = num_adapter_layers
_lowerCAmelCase : List[str] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCAmelCase : Dict = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCAmelCase : List[Any] = list(__UpperCAmelCase )
_lowerCAmelCase : Union[str, Any] = list(__UpperCAmelCase )
_lowerCAmelCase : Dict = list(__UpperCAmelCase )
_lowerCAmelCase : Optional[int] = xvector_output_dim
@property
def a ( self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 366 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = KandinskyVaaInpaintPipeline
__magic_name__ = ["image_embeds", "negative_image_embeds", "image", "mask_image"]
__magic_name__ = [
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
__magic_name__ = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__magic_name__ = False
@property
def a ( self ):
'''simple docstring'''
return 32
@property
def a ( self ):
'''simple docstring'''
return 32
@property
def a ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def a ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def a ( self ):
'''simple docstring'''
return 100
@property
def a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_lowerCAmelCase : Union[str, Any] = UNetaDConditionModel(**snake_case__ )
return model
@property
def a ( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Dict = VQModel(**self.dummy_movq_kwargs )
return model
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.dummy_unet
_lowerCAmelCase : List[Any] = self.dummy_movq
_lowerCAmelCase : Union[str, Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , steps_offset=1 , prediction_type='epsilon' , thresholding=snake_case__ , )
_lowerCAmelCase : Any = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def a ( self , snake_case__ , snake_case__=0 ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
_lowerCAmelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case__ )
# create init_image
_lowerCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
_lowerCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(snake_case__ ) ).convert('RGB' ).resize((256, 256) )
# create mask
_lowerCAmelCase : List[str] = np.ones((64, 64) , dtype=np.floataa )
_lowerCAmelCase : Dict = 0
if str(snake_case__ ).startswith('mps' ):
_lowerCAmelCase : Optional[Any] = torch.manual_seed(snake_case__ )
else:
_lowerCAmelCase : List[Any] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
_lowerCAmelCase : Optional[int] = {
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = 'cpu'
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Dict = self.pipeline_class(**snake_case__ )
_lowerCAmelCase : Optional[int] = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
_lowerCAmelCase : Union[str, Any] = pipe(**self.get_dummy_inputs(snake_case__ ) )
_lowerCAmelCase : int = output.images
_lowerCAmelCase : int = pipe(
**self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0]
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
_lowerCAmelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
print(F'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : List[str] = np.array(
[0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def a ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' )
_lowerCAmelCase : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
_lowerCAmelCase : Dict = np.ones((768, 768) , dtype=np.floataa )
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : List[str] = 'a hat'
_lowerCAmelCase : Any = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(snake_case__ )
_lowerCAmelCase : Union[str, Any] = KandinskyVaaInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa )
_lowerCAmelCase : Optional[Any] = pipeline.to(snake_case__ )
pipeline.set_progress_bar_config(disable=snake_case__ )
_lowerCAmelCase : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase , _lowerCAmelCase : Dict = pipe_prior(
snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
_lowerCAmelCase : Optional[Any] = pipeline(
image=snake_case__ , mask_image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=100 , height=768 , width=768 , output_type='np' , )
_lowerCAmelCase : Union[str, Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 25 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=6 , snake_case__=17 , snake_case__=23 , snake_case__=11 , snake_case__=True , ):
'''simple docstring'''
_lowerCAmelCase : Any = parent
_lowerCAmelCase : Union[str, Any] = batch_size
_lowerCAmelCase : Dict = seq_length
_lowerCAmelCase : Optional[int] = act_dim
_lowerCAmelCase : Any = state_dim
_lowerCAmelCase : int = hidden_size
_lowerCAmelCase : List[Any] = max_length
_lowerCAmelCase : Tuple = is_training
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
_lowerCAmelCase : List[Any] = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
_lowerCAmelCase : Union[str, Any] = floats_tensor((self.batch_size, self.seq_length, 1) )
_lowerCAmelCase : str = floats_tensor((self.batch_size, self.seq_length, 1) )
_lowerCAmelCase : Optional[Any] = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 )
_lowerCAmelCase : str = random_attention_mask((self.batch_size, self.seq_length) )
_lowerCAmelCase : Optional[int] = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def a ( self ):
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : str = DecisionTransformerModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_lowerCAmelCase : Optional[int] = model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.prepare_config_and_inputs()
(
_lowerCAmelCase
) : Optional[int] = config_and_inputs
_lowerCAmelCase : str = {
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = (DecisionTransformerModel,) if is_torch_available() else ()
__magic_name__ = ()
__magic_name__ = {'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
__magic_name__ = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = DecisionTransformerModelTester(self )
_lowerCAmelCase : List[Any] = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def a ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
@slow
def a ( self ):
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Dict = DecisionTransformerModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : int = model_class(lowerCAmelCase__ )
_lowerCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : str = [*signature.parameters.keys()]
_lowerCAmelCase : Tuple = [
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(lowerCAmelCase__ )] , lowerCAmelCase__ )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = 2 # number of steps of autoregressive prediction we will perform
_lowerCAmelCase : List[str] = 10 # defined by the RL environment, may be normalized
_lowerCAmelCase : Optional[Any] = DecisionTransformerModel.from_pretrained('edbeeching/decision-transformer-gym-hopper-expert' )
_lowerCAmelCase : Optional[Any] = model.to(lowerCAmelCase__ )
_lowerCAmelCase : Any = model.config
torch.manual_seed(0 )
_lowerCAmelCase : Dict = torch.randn(1 , 1 , config.state_dim ).to(device=lowerCAmelCase__ , dtype=torch.floataa ) # env.reset()
_lowerCAmelCase : Union[str, Any] = torch.tensor(
[[0.24_2793, -0.2869_3074, 0.874_2613], [0.6781_5274, -0.0810_1085, -0.1295_2147]] , device=lowerCAmelCase__ )
_lowerCAmelCase : Any = torch.tensor(lowerCAmelCase__ , device=lowerCAmelCase__ , dtype=torch.floataa ).reshape(1 , 1 , 1 )
_lowerCAmelCase : str = state
_lowerCAmelCase : Union[str, Any] = torch.zeros(1 , 0 , config.act_dim , device=lowerCAmelCase__ , dtype=torch.floataa )
_lowerCAmelCase : Dict = torch.zeros(1 , 0 , device=lowerCAmelCase__ , dtype=torch.floataa )
_lowerCAmelCase : Optional[Any] = torch.tensor(0 , device=lowerCAmelCase__ , dtype=torch.long ).reshape(1 , 1 )
for step in range(lowerCAmelCase__ ):
_lowerCAmelCase : Optional[Any] = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=lowerCAmelCase__ )] , dim=1 )
_lowerCAmelCase : Tuple = torch.cat([rewards, torch.zeros(1 , 1 , device=lowerCAmelCase__ )] , dim=1 )
_lowerCAmelCase : List[str] = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
_lowerCAmelCase : Union[str, Any] = model(
states=lowerCAmelCase__ , actions=lowerCAmelCase__ , rewards=lowerCAmelCase__ , returns_to_go=lowerCAmelCase__ , timesteps=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
_lowerCAmelCase : Any = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=lowerCAmelCase__ , dtype=torch.floataa ),
1.0,
False,
{},
)
_lowerCAmelCase : List[str] = action_pred[0, -1]
_lowerCAmelCase : Union[str, Any] = torch.cat([states, state] , dim=1 )
_lowerCAmelCase : Any = returns_to_go[0, -1] - reward
_lowerCAmelCase : List[Any] = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
_lowerCAmelCase : Dict = torch.cat(
[timesteps, torch.ones((1, 1) , device=lowerCAmelCase__ , dtype=torch.long ) * (step + 1)] , dim=1 )
| 367 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def lowercase (_A ):
"""simple docstring"""
if not postfix_notation:
return 0
_lowerCAmelCase : int = {'+', '-', '*', '/'}
_lowerCAmelCase : list[Any] = []
for token in postfix_notation:
if token in operations:
_lowerCAmelCase , _lowerCAmelCase : Tuple = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(_A ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 0 |
'''simple docstring'''
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
lowerCAmelCase : Tuple = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase : List[Any] = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
lowerCAmelCase : List[Any] = re.compile(r"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
lowerCAmelCase : Optional[int] = re.compile(r"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCAmelCase : Any = re.compile(r"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
lowerCAmelCase : Tuple = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , _UpperCamelCase )
return [m.group(0 ) for m in matches]
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_lowerCAmelCase : Any = {
config.replace('Config' , '' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
_lowerCAmelCase : Dict = collections.defaultdict(_UpperCamelCase )
_lowerCAmelCase : List[str] = collections.defaultdict(_UpperCamelCase )
_lowerCAmelCase : Optional[Any] = collections.defaultdict(_UpperCamelCase )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(_UpperCamelCase ):
_lowerCAmelCase : Optional[int] = None
if _re_tf_models.match(_UpperCamelCase ) is not None:
_lowerCAmelCase : Dict = tf_models
_lowerCAmelCase : Tuple = _re_tf_models.match(_UpperCamelCase ).groups()[0]
elif _re_flax_models.match(_UpperCamelCase ) is not None:
_lowerCAmelCase : Optional[Any] = flax_models
_lowerCAmelCase : Tuple = _re_flax_models.match(_UpperCamelCase ).groups()[0]
elif _re_pt_models.match(_UpperCamelCase ) is not None:
_lowerCAmelCase : Dict = pt_models
_lowerCAmelCase : Tuple = _re_pt_models.match(_UpperCamelCase ).groups()[0]
if lookup_dict is not None:
while len(_UpperCamelCase ) > 0:
if attr_name in model_prefix_to_model_type:
_lowerCAmelCase : Optional[Any] = True
break
# Try again after removing the last word in the name
_lowerCAmelCase : Dict = ''.join(camel_case_split(_UpperCamelCase )[:-1] )
_lowerCAmelCase : Optional[Any] = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
_lowerCAmelCase : Optional[int] = list(_UpperCamelCase )
all_models.sort()
_lowerCAmelCase : Any = {'model_type': all_models}
_lowerCAmelCase : List[Any] = [pt_models[t] for t in all_models]
_lowerCAmelCase : Union[str, Any] = [tf_models[t] for t in all_models]
_lowerCAmelCase : int = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
_lowerCAmelCase : List[Any] = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
_lowerCAmelCase : List[str] = 'AutoProcessor'
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
_lowerCAmelCase : Tuple = 'AutoTokenizer'
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
_lowerCAmelCase : Tuple = 'AutoFeatureExtractor'
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
_lowerCAmelCase : Tuple = 'AutoTokenizer'
_lowerCAmelCase : str = [processors[t] for t in all_models]
return pd.DataFrame(_UpperCamelCase )
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : str = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
_lowerCAmelCase : Tuple = [model_mapping, f'TF_{model_mapping}', f'FLAX_{model_mapping}']
_lowerCAmelCase : Tuple = [auto_class, f'TF_{auto_class}', f'Flax_{auto_class}']
# Loop through all three frameworks
for module, cls, mapping in zip(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
# The type of pipeline may not exist in this framework
if not hasattr(_UpperCamelCase , _UpperCamelCase ):
continue
# First extract all model_names
_lowerCAmelCase : Optional[int] = []
for name in getattr(_UpperCamelCase , _UpperCamelCase ).values():
if isinstance(_UpperCamelCase , _UpperCamelCase ):
model_names.append(_UpperCamelCase )
else:
model_names.extend(list(_UpperCamelCase ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = get_frameworks_table()
_lowerCAmelCase : str = Dataset.from_pandas(_UpperCamelCase )
_lowerCAmelCase : Union[str, Any] = hf_hub_download(
'huggingface/transformers-metadata' , 'pipeline_tags.json' , repo_type='dataset' , token=_UpperCamelCase )
_lowerCAmelCase : Dict = Dataset.from_json(_UpperCamelCase )
_lowerCAmelCase : Any = {
tags_dataset[i]['model_class']: (tags_dataset[i]['pipeline_tag'], tags_dataset[i]['auto_class'])
for i in range(len(_UpperCamelCase ) )
}
_lowerCAmelCase : str = update_pipeline_and_auto_class_table(_UpperCamelCase )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
_lowerCAmelCase : List[str] = sorted(table.keys() )
_lowerCAmelCase : int = pd.DataFrame(
{
'model_class': model_classes,
'pipeline_tag': [table[m][0] for m in model_classes],
'auto_class': [table[m][1] for m in model_classes],
} )
_lowerCAmelCase : Tuple = Dataset.from_pandas(_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(_UpperCamelCase , 'frameworks.json' ) )
tags_dataset.to_json(os.path.join(_UpperCamelCase , 'pipeline_tags.json' ) )
if commit_sha is not None:
_lowerCAmelCase : Optional[int] = (
f'Update with commit {commit_sha}\n\nSee: '
f'https://github.com/huggingface/transformers/commit/{commit_sha}'
)
else:
_lowerCAmelCase : Optional[int] = 'Update'
upload_folder(
repo_id='huggingface/transformers-metadata' , folder_path=_UpperCamelCase , repo_type='dataset' , token=_UpperCamelCase , commit_message=_UpperCamelCase , )
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
_lowerCAmelCase : int = transformers_module.pipelines.SUPPORTED_TASKS
_lowerCAmelCase : Optional[int] = []
for key in pipeline_tasks:
if key not in in_table:
_lowerCAmelCase : str = pipeline_tasks[key]['pt']
if isinstance(_UpperCamelCase , (list, tuple) ):
_lowerCAmelCase : int = model[0]
_lowerCAmelCase : List[str] = model.__name__
if model not in in_table.values():
missing.append(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
_lowerCAmelCase : str = ', '.join(_UpperCamelCase )
raise ValueError(
'The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '
f'`utils/update_metadata.py`: {msg}. Please add them!' )
if __name__ == "__main__":
lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument("""--token""", type=str, help="""The token to use to push to the transformers-metadata dataset.""")
parser.add_argument("""--commit_sha""", type=str, help="""The sha of the commit going with this update.""")
parser.add_argument("""--check-only""", action="""store_true""", help="""Activate to just check all pipelines are present.""")
lowerCAmelCase : Union[str, Any] = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 368 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] = {
"""google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""",
"""google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""",
"""google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "mobilenet_v2"
def __init__( self , snake_case__=3 , snake_case__=224 , snake_case__=1.0 , snake_case__=8 , snake_case__=8 , snake_case__=6 , snake_case__=32 , snake_case__=True , snake_case__=True , snake_case__="relu6" , snake_case__=True , snake_case__=0.8 , snake_case__=0.02 , snake_case__=0.001 , snake_case__=255 , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Union[str, Any] = image_size
_lowerCAmelCase : List[Any] = depth_multiplier
_lowerCAmelCase : List[Any] = depth_divisible_by
_lowerCAmelCase : Optional[Any] = min_depth
_lowerCAmelCase : str = expand_ratio
_lowerCAmelCase : str = output_stride
_lowerCAmelCase : Any = first_layer_is_expansion
_lowerCAmelCase : int = finegrained_output
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : List[str] = tf_padding
_lowerCAmelCase : Optional[int] = classifier_dropout_prob
_lowerCAmelCase : int = initializer_range
_lowerCAmelCase : Optional[int] = layer_norm_eps
_lowerCAmelCase : str = semantic_loss_ignore_index
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = version.parse("1.11" )
@property
def a ( self ):
'''simple docstring'''
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def a ( self ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def a ( self ):
'''simple docstring'''
return 1E-4
| 25 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase : Optional[Any] = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = ['''DeiTFeatureExtractor''']
lowerCAmelCase : Any = ['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple = [
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = [
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
lowerCAmelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 369 |
'''simple docstring'''
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = SMALL_MODEL_IDENTIFIER
_lowerCAmelCase : Optional[int] = 'pt'
_lowerCAmelCase : Tuple = 'tf'
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Tuple = TFAutoModel.from_pretrained(self.test_model , from_pt=snake_case__ )
model_tf.save_pretrained(snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = 'mock_framework'
# Framework provided - return whatever the user provides
_lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model , snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(snake_case__ )
_lowerCAmelCase : Dict = FeaturesManager.determine_framework(snake_case__ , snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(snake_case__ )
_lowerCAmelCase : int = FeaturesManager.determine_framework(snake_case__ , snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
def a ( self ):
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(snake_case__ )
_lowerCAmelCase : Tuple = FeaturesManager.determine_framework(snake_case__ )
self.assertEqual(snake_case__ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(snake_case__ )
_lowerCAmelCase : Optional[int] = FeaturesManager.determine_framework(snake_case__ )
self.assertEqual(snake_case__ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(snake_case__ ):
_lowerCAmelCase : str = FeaturesManager.determine_framework(snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = MagicMock(return_value=snake_case__ )
with patch('transformers.onnx.features.is_tf_available' , snake_case__ ):
_lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(snake_case__ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
_lowerCAmelCase : Any = MagicMock(return_value=snake_case__ )
with patch('transformers.onnx.features.is_torch_available' , snake_case__ ):
_lowerCAmelCase : Union[str, Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(snake_case__ , self.framework_tf )
# Both in environment -> use PyTorch
_lowerCAmelCase : int = MagicMock(return_value=snake_case__ )
_lowerCAmelCase : Optional[int] = MagicMock(return_value=snake_case__ )
with patch('transformers.onnx.features.is_tf_available' , snake_case__ ), patch(
'transformers.onnx.features.is_torch_available' , snake_case__ ):
_lowerCAmelCase : Dict = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(snake_case__ , self.framework_pt )
# Both not in environment -> raise error
_lowerCAmelCase : str = MagicMock(return_value=snake_case__ )
_lowerCAmelCase : Optional[Any] = MagicMock(return_value=snake_case__ )
with patch('transformers.onnx.features.is_tf_available' , snake_case__ ), patch(
'transformers.onnx.features.is_torch_available' , snake_case__ ):
with self.assertRaises(snake_case__ ):
_lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model )
| 25 | 0 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCamelCase__ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = IFInpaintingSuperResolutionPipeline
__magic_name__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
__magic_name__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
__magic_name__ = PipelineTesterMixin.required_optional_params - {"latents"}
def a ( self ):
'''simple docstring'''
return self._get_superresolution_dummy_components()
def a ( self , snake_case__ , snake_case__=0 ):
'''simple docstring'''
if str(_lowerCAmelCase ).startswith('mps' ):
_lowerCAmelCase : int = torch.manual_seed(_lowerCAmelCase )
else:
_lowerCAmelCase : List[Any] = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
_lowerCAmelCase : Dict = floats_tensor((1, 3, 16, 16) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
_lowerCAmelCase : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
_lowerCAmelCase : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
_lowerCAmelCase : Tuple = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def a ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def a ( self ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def a ( self ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def a ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def a ( self ):
'''simple docstring'''
self._test_save_load_local()
def a ( self ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 370 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : Any = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : List[str] = {
"""facebook/nllb-large-en-ro""": 10_24,
"""facebook/nllb-200-distilled-600M""": 10_24,
}
# fmt: off
lowerCAmelCase : Optional[int] = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = ["input_ids", "attention_mask"]
__magic_name__ = NllbTokenizer
__magic_name__ = []
__magic_name__ = []
def __init__( self , snake_case__=None , snake_case__=None , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=False , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
_lowerCAmelCase : Dict = legacy_behaviour
super().__init__(
vocab_file=snake_case__ , tokenizer_file=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , src_lang=snake_case__ , tgt_lang=snake_case__ , additional_special_tokens=snake_case__ , legacy_behaviour=snake_case__ , **snake_case__ , )
_lowerCAmelCase : List[str] = vocab_file
_lowerCAmelCase : int = False if not self.vocab_file else True
_lowerCAmelCase : str = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
_lowerCAmelCase : Any = {
lang_code: self.convert_tokens_to_ids(snake_case__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_lowerCAmelCase : List[Any] = src_lang if src_lang is not None else 'eng_Latn'
_lowerCAmelCase : str = self.convert_tokens_to_ids(self._src_lang )
_lowerCAmelCase : Tuple = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def a ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
_lowerCAmelCase : Optional[Any] = src_lang
_lowerCAmelCase : Union[str, Any] = self(snake_case__ , add_special_tokens=snake_case__ , return_tensors=snake_case__ , **snake_case__ )
_lowerCAmelCase : int = self.convert_tokens_to_ids(snake_case__ )
_lowerCAmelCase : Optional[Any] = tgt_lang_id
return inputs
def a ( self , snake_case__ , snake_case__ = "eng_Latn" , snake_case__ = None , snake_case__ = "fra_Latn" , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[str] = src_lang
_lowerCAmelCase : Optional[int] = tgt_lang
return super().prepare_seqaseq_batch(snake_case__ , snake_case__ , **snake_case__ )
def a ( self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def a ( self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = self.convert_tokens_to_ids(snake_case__ )
if self.legacy_behaviour:
_lowerCAmelCase : Dict = []
_lowerCAmelCase : List[str] = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCAmelCase : int = [self.cur_lang_code]
_lowerCAmelCase : int = [self.eos_token_id]
_lowerCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
_lowerCAmelCase : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
_lowerCAmelCase : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.convert_tokens_to_ids(snake_case__ )
if self.legacy_behaviour:
_lowerCAmelCase : int = []
_lowerCAmelCase : Dict = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCAmelCase : int = [self.cur_lang_code]
_lowerCAmelCase : List[str] = [self.eos_token_id]
_lowerCAmelCase : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
_lowerCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
_lowerCAmelCase : str = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(snake_case__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
_lowerCAmelCase : Union[str, Any] = os.path.join(
snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
return (out_vocab_file,)
| 25 | 0 |
'''simple docstring'''
import os
from collections.abc import Iterator
def lowercase (_A = "." ):
"""simple docstring"""
for dir_path, dir_names, filenames in os.walk(UpperCAmelCase__ ):
_lowerCAmelCase : Dict = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(UpperCAmelCase__ )[1] in (".py", ".ipynb"):
yield os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ).lstrip('./' )
def lowercase (_A ):
"""simple docstring"""
return f'{i * " "}*' if i else "\n##"
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(UpperCAmelCase__ ) or old_parts[i] != new_part) and new_part:
print(f'{md_prefix(UpperCAmelCase__ )} {new_part.replace("_" , " " ).title()}' )
return new_path
def lowercase (_A = "." ):
"""simple docstring"""
_lowerCAmelCase : List[str] = """"""
for filepath in sorted(good_file_paths(UpperCAmelCase__ ) ):
_lowerCAmelCase : Optional[Any] = os.path.split(UpperCAmelCase__ )
if filepath != old_path:
_lowerCAmelCase : Tuple = print_path(UpperCAmelCase__ , UpperCAmelCase__ )
_lowerCAmelCase : List[Any] = (filepath.count(os.sep ) + 1) if filepath else 0
_lowerCAmelCase : int = f'{filepath}/{filename}'.replace(' ' , '%20' )
_lowerCAmelCase : List[Any] = os.path.splitext(filename.replace('_' , ' ' ).title() )[0]
print(f'{md_prefix(UpperCAmelCase__ )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md(""".""")
| 371 |
'''simple docstring'''
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
lowerCAmelCase : List[str] = [
"""kernels/rwkv/wkv_cuda.cu""",
"""kernels/rwkv/wkv_op.cpp""",
"""kernels/deformable_detr/ms_deform_attn.h""",
"""kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh""",
"""models/graphormer/algos_graphormer.pyx""",
]
def lowercase (_A ):
"""simple docstring"""
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument("""--check_lib""", action="""store_true""", help="""Whether to check the build or the actual package.""")
lowerCAmelCase : Dict = parser.parse_args()
if args.check_lib:
lowerCAmelCase : Union[str, Any] = importlib.import_module("""transformers""")
lowerCAmelCase : int = Path(transformers_module.__file__).parent
else:
lowerCAmelCase : int = Path.cwd() / """build/lib/transformers"""
if not test_custom_files_are_present(transformers_path):
raise ValueError("""The built release does not contain the custom files. Fix this before going further!""")
| 25 | 0 |
'''simple docstring'''
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowerCAmelCase = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("""""", """|""", """|"""),
datarow=DataRow("""""", """|""", """|"""),
padding=1,
with_header_hide=None,
)
lowerCAmelCase = []
lowerCAmelCase = []
lowerCAmelCase = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}}
lowerCAmelCase = [
{
"""type""": """header""",
"""text""": {
"""type""": """plain_text""",
"""text""": F'''🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results''',
"""emoji""": True,
},
}
]
lowerCAmelCase = 0
for log in Path().glob("""*.log"""):
lowerCAmelCase = 0
with open(log, """r""") as f:
for line in f:
lowerCAmelCase = json.loads(line)
if line.get("""nodeid""", """""") != "":
lowerCAmelCase = line["""nodeid"""]
if line.get("""duration""", None) is not None:
lowerCAmelCase = F'''{line["duration"]:.4f}'''
if line.get("""outcome""", """""") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("""_""")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowerCAmelCase = []
log.unlink()
lowerCAmelCase = """"""
lowerCAmelCase = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
lowerCAmelCase = []
lowerCAmelCase = {}
for test in failed_tests:
lowerCAmelCase = test[0].split("""::""")
lowerCAmelCase = data[0].split("""/""")[-1]
if data[0] not in filesafailed:
lowerCAmelCase = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowerCAmelCase = [test[0] for test in failed_table]
lowerCAmelCase = list(set(files))
# Count number of instances in failed_tests
lowerCAmelCase = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowerCAmelCase = tabulate(
table,
headers=["""Test Location""", """Num Failed"""],
tablefmt=hf_table_format,
stralign="""right""",
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 30_00:
lowerCAmelCase = """Too many failed tests, please see the full report in the Action results."""
lowerCAmelCase = len(err) + 10
lowerCAmelCase = message[: 30_00 - offset] + F'''\n...\n```\n{err}'''
print(F'''### {message}''')
else:
lowerCAmelCase = """No failed tests! 🤗"""
print(F'''## {message}''')
payload.append(no_error_payload)
if os.environ.get("""TEST_TYPE""", """""") != "":
from slack_sdk import WebClient
lowerCAmelCase = WebClient(token=os.environ["""SLACK_API_TOKEN"""])
if message != "No failed tests! 🤗":
lowerCAmelCase = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": message,
},
}
payload.append(md_report)
lowerCAmelCase = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": """*For more details:*""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {
"""type""": """plain_text""",
"""text""": """Check Action results""",
"""emoji""": True,
},
"""url""": F'''https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
payload.append(action_button)
lowerCAmelCase = {
"""type""": """context""",
"""elements""": [
{
"""type""": """plain_text""",
"""text""": F'''Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}''',
}
],
}
payload.append(date_report)
lowerCAmelCase = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload)
lowerCAmelCase = response.data["""ts"""]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowerCAmelCase = """"""
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowerCAmelCase = row[0]
else:
lowerCAmelCase = """"""
lowerCAmelCase = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": F'''Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```''',
},
}
client.chat_postMessage(
channel="""#accelerate-ci-daily""",
thread_ts=ts,
blocks=[payload],
)
| 350 |
'''simple docstring'''
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
_lowerCAmelCase : List[str] = ''
_lowerCAmelCase : Any = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(_A ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = 0, 0
# length[i] shows the length of palindromic substring with center i
_lowerCAmelCase : List[str] = [1 for i in range(len(_A ) )]
# for each character in new_string find corresponding palindromic string
_lowerCAmelCase : Any = 0
for j in range(len(_A ) ):
_lowerCAmelCase : Optional[Any] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(_A )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_lowerCAmelCase : List[str] = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_lowerCAmelCase : Optional[Any] = j - k + 1 # noqa: E741
_lowerCAmelCase : int = j + k - 1
# update max_length and start position
if max_length < length[j]:
_lowerCAmelCase : Dict = length[j]
_lowerCAmelCase : Optional[int] = j
# create that string
_lowerCAmelCase : List[str] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : Optional[Any] = {
"""configuration_funnel""": ["""FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FunnelConfig"""],
"""convert_funnel_original_tf_checkpoint_to_pytorch""": [],
"""tokenization_funnel""": ["""FunnelTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = ["""FunnelTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int = [
"""FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FunnelBaseModel""",
"""FunnelForMaskedLM""",
"""FunnelForMultipleChoice""",
"""FunnelForPreTraining""",
"""FunnelForQuestionAnswering""",
"""FunnelForSequenceClassification""",
"""FunnelForTokenClassification""",
"""FunnelModel""",
"""FunnelPreTrainedModel""",
"""load_tf_weights_in_funnel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any = [
"""TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFFunnelBaseModel""",
"""TFFunnelForMaskedLM""",
"""TFFunnelForMultipleChoice""",
"""TFFunnelForPreTraining""",
"""TFFunnelForQuestionAnswering""",
"""TFFunnelForSequenceClassification""",
"""TFFunnelForTokenClassification""",
"""TFFunnelModel""",
"""TFFunnelPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 351 |
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = 0
__magic_name__ = False
__magic_name__ = 3.0
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} )
self.assertDictEqual(MockClass(a=2 , b=snake_case__ ).to_kwargs() , {'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} )
@require_cuda
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
_lowerCAmelCase : Dict = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
_lowerCAmelCase : str = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , snake_case__ )
@require_multi_gpu
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(snake_case__ , env=os.environ.copy() )
if __name__ == "__main__":
lowerCAmelCase : int = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
lowerCAmelCase : Tuple = Accelerator(kwargs_handlers=[ddp_scaler])
lowerCAmelCase : Optional[Any] = torch.nn.Linear(1_00, 2_00)
lowerCAmelCase : List[str] = accelerator.prepare(model)
# Check the values changed in kwargs
lowerCAmelCase : List[Any] = """"""
lowerCAmelCase : Tuple = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 25 | 0 |
'''simple docstring'''
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
lowerCAmelCase : int = """src/transformers"""
lowerCAmelCase : Optional[int] = """docs/source/en/tasks"""
def lowercase (_A , _A , _A ):
"""simple docstring"""
with open(_snake_case , 'r' , encoding='utf-8' , newline='\n' ) as f:
_lowerCAmelCase : str = f.readlines()
# Find the start prompt.
_lowerCAmelCase : Optional[int] = 0
while not lines[start_index].startswith(_snake_case ):
start_index += 1
start_index += 1
_lowerCAmelCase : List[Any] = start_index
while not lines[end_index].startswith(_snake_case ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase : Optional[int] = direct_transformers_import(TRANSFORMERS_PATH)
lowerCAmelCase : int = {
"""asr.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"""audio_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"""language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"""image_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"""masked_language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"""multiple_choice.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"""object_detection.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"""question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"""semantic_segmentation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"""sequence_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"""summarization.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""token_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"""translation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""video_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"""document_question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"""monocular_depth_estimation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
lowerCAmelCase : Optional[Any] = {
"""summarization.md""": ("""nllb""",),
"""translation.md""": ("""nllb""",),
}
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : str = TASK_GUIDE_TO_MODELS[task_guide]
_lowerCAmelCase : Optional[int] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(_snake_case , set() )
_lowerCAmelCase : str = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f'[{name}](../model_doc/{code})' for code, name in model_names.items()] ) + "\n"
def lowercase (_A , _A=False ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = _find_text_in_file(
filename=os.path.join(_snake_case , _snake_case ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , )
_lowerCAmelCase : int = get_model_list_for_task(_snake_case )
if current_list != new_list:
if overwrite:
with open(os.path.join(_snake_case , _snake_case ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f'The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'
' to fix this.' )
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
lowerCAmelCase : List[Any] = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 352 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] = {
"""CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": (
"""https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"""
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "trajectory_transformer"
__magic_name__ = ["past_key_values"]
__magic_name__ = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , snake_case__=100 , snake_case__=5 , snake_case__=1 , snake_case__=1 , snake_case__=249 , snake_case__=6 , snake_case__=17 , snake_case__=25 , snake_case__=4 , snake_case__=4 , snake_case__=128 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.0006 , snake_case__=512 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=1 , snake_case__=True , snake_case__=1 , snake_case__=5_0256 , snake_case__=5_0256 , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Any = action_weight
_lowerCAmelCase : Optional[int] = reward_weight
_lowerCAmelCase : Union[str, Any] = value_weight
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : Tuple = block_size
_lowerCAmelCase : List[Any] = action_dim
_lowerCAmelCase : List[Any] = observation_dim
_lowerCAmelCase : Union[str, Any] = transition_dim
_lowerCAmelCase : Tuple = learning_rate
_lowerCAmelCase : int = n_layer
_lowerCAmelCase : Any = n_head
_lowerCAmelCase : Tuple = n_embd
_lowerCAmelCase : Optional[Any] = embd_pdrop
_lowerCAmelCase : Union[str, Any] = attn_pdrop
_lowerCAmelCase : Any = resid_pdrop
_lowerCAmelCase : Optional[Any] = initializer_range
_lowerCAmelCase : List[Any] = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = kaiming_initializer_range
_lowerCAmelCase : List[Any] = use_cache
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
| 25 | 0 |
'''simple docstring'''
import random
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = num - 1
_lowerCAmelCase : Optional[Any] = 0
while s % 2 == 0:
_lowerCAmelCase : Optional[int] = s // 2
t += 1
for _ in range(5 ):
_lowerCAmelCase : Dict = random.randrange(2 , num - 1 )
_lowerCAmelCase : Dict = pow(_A , _A , _A )
if v != 1:
_lowerCAmelCase : str = 0
while v != (num - 1):
if i == t - 1:
return False
else:
_lowerCAmelCase : Tuple = i + 1
_lowerCAmelCase : Optional[Any] = (v**2) % num
return True
def lowercase (_A ):
"""simple docstring"""
if num < 2:
return False
_lowerCAmelCase : List[Any] = [
2,
3,
5,
7,
1_1,
1_3,
1_7,
1_9,
2_3,
2_9,
3_1,
3_7,
4_1,
4_3,
4_7,
5_3,
5_9,
6_1,
6_7,
7_1,
7_3,
7_9,
8_3,
8_9,
9_7,
1_0_1,
1_0_3,
1_0_7,
1_0_9,
1_1_3,
1_2_7,
1_3_1,
1_3_7,
1_3_9,
1_4_9,
1_5_1,
1_5_7,
1_6_3,
1_6_7,
1_7_3,
1_7_9,
1_8_1,
1_9_1,
1_9_3,
1_9_7,
1_9_9,
2_1_1,
2_2_3,
2_2_7,
2_2_9,
2_3_3,
2_3_9,
2_4_1,
2_5_1,
2_5_7,
2_6_3,
2_6_9,
2_7_1,
2_7_7,
2_8_1,
2_8_3,
2_9_3,
3_0_7,
3_1_1,
3_1_3,
3_1_7,
3_3_1,
3_3_7,
3_4_7,
3_4_9,
3_5_3,
3_5_9,
3_6_7,
3_7_3,
3_7_9,
3_8_3,
3_8_9,
3_9_7,
4_0_1,
4_0_9,
4_1_9,
4_2_1,
4_3_1,
4_3_3,
4_3_9,
4_4_3,
4_4_9,
4_5_7,
4_6_1,
4_6_3,
4_6_7,
4_7_9,
4_8_7,
4_9_1,
4_9_9,
5_0_3,
5_0_9,
5_2_1,
5_2_3,
5_4_1,
5_4_7,
5_5_7,
5_6_3,
5_6_9,
5_7_1,
5_7_7,
5_8_7,
5_9_3,
5_9_9,
6_0_1,
6_0_7,
6_1_3,
6_1_7,
6_1_9,
6_3_1,
6_4_1,
6_4_3,
6_4_7,
6_5_3,
6_5_9,
6_6_1,
6_7_3,
6_7_7,
6_8_3,
6_9_1,
7_0_1,
7_0_9,
7_1_9,
7_2_7,
7_3_3,
7_3_9,
7_4_3,
7_5_1,
7_5_7,
7_6_1,
7_6_9,
7_7_3,
7_8_7,
7_9_7,
8_0_9,
8_1_1,
8_2_1,
8_2_3,
8_2_7,
8_2_9,
8_3_9,
8_5_3,
8_5_7,
8_5_9,
8_6_3,
8_7_7,
8_8_1,
8_8_3,
8_8_7,
9_0_7,
9_1_1,
9_1_9,
9_2_9,
9_3_7,
9_4_1,
9_4_7,
9_5_3,
9_6_7,
9_7_1,
9_7_7,
9_8_3,
9_9_1,
9_9_7,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(_A )
def lowercase (_A = 1_0_2_4 ):
"""simple docstring"""
while True:
_lowerCAmelCase : Any = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(_A ):
return num
if __name__ == "__main__":
lowerCAmelCase : Tuple = generate_large_prime()
print(("""Prime number:""", num))
print(("""is_prime_low_num:""", is_prime_low_num(num)))
| 353 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase : Tuple = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowerCAmelCase : Union[str, Any] = 25_00_04
lowerCAmelCase : int = 25_00_20
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = MBartaaTokenizer
__magic_name__ = MBartaaTokenizerFast
__magic_name__ = True
__magic_name__ = True
def a ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : List[Any] = MBartaaTokenizer(snake_case__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = '<s>'
_lowerCAmelCase : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(snake_case__ ) , 1054 )
def a ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1054 )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = MBartaaTokenizer(snake_case__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=snake_case__ )
_lowerCAmelCase : Any = tokenizer.tokenize('This is a test' )
self.assertListEqual(snake_case__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCAmelCase : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
_lowerCAmelCase : Optional[int] = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(
snake_case__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCAmelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = {'input_ids': [[25_0004, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [25_0004, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_0004, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name='facebook/mbart-large-50' , revision='d3913889c59cd5c9e456b269c376325eabad57e2' , )
def a ( self ):
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_lowerCAmelCase : Optional[int] = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart50', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
_lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
_lowerCAmelCase : Tuple = tokenizer_r.save_pretrained(snake_case__ )
_lowerCAmelCase : str = tokenizer_p.save_pretrained(snake_case__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
_lowerCAmelCase : Any = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(snake_case__ , snake_case__ )
# Checks everything loads correctly in the same way
_lowerCAmelCase : List[str] = tokenizer_r.from_pretrained(snake_case__ )
_lowerCAmelCase : Optional[int] = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(snake_case__ )
# Save tokenizer rust, legacy_format=True
_lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp()
_lowerCAmelCase : Dict = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ )
_lowerCAmelCase : Any = tokenizer_p.save_pretrained(snake_case__ )
# Checks it save with the same files
self.assertSequenceEqual(snake_case__ , snake_case__ )
# Checks everything loads correctly in the same way
_lowerCAmelCase : Dict = tokenizer_r.from_pretrained(snake_case__ )
_lowerCAmelCase : List[str] = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
shutil.rmtree(snake_case__ )
# Save tokenizer rust, legacy_format=False
_lowerCAmelCase : Optional[int] = tempfile.mkdtemp()
_lowerCAmelCase : int = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ )
_lowerCAmelCase : Tuple = tokenizer_p.save_pretrained(snake_case__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_lowerCAmelCase : int = tokenizer_r.from_pretrained(snake_case__ )
_lowerCAmelCase : str = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
shutil.rmtree(snake_case__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
__magic_name__ = "facebook/mbart-large-50-one-to-many-mmt"
__magic_name__ = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
__magic_name__ = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
__magic_name__ = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2]
@classmethod
def a ( cls ):
'''simple docstring'''
_lowerCAmelCase : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
_lowerCAmelCase : Dict = 1
return cls
def a ( self ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 25_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 25_0004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 25_0020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['mr_IN'] , 25_0038 )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , snake_case__ )
def a ( self ):
'''simple docstring'''
self.assertIn(snake_case__ , self.tokenizer.all_special_ids )
_lowerCAmelCase : Union[str, Any] = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
_lowerCAmelCase : List[str] = self.tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ )
_lowerCAmelCase : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
self.assertNotIn(self.tokenizer.eos_token , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , snake_case__ )
_lowerCAmelCase : List[str] = 10
_lowerCAmelCase : Any = self.tokenizer(snake_case__ , max_length=snake_case__ , truncation=snake_case__ ).input_ids[0]
self.assertEqual(ids[0] , snake_case__ )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(snake_case__ ) , snake_case__ )
def a ( self ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [25_0053, 25_0001] )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
_lowerCAmelCase : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(snake_case__ )
_lowerCAmelCase : Tuple = MBartaaTokenizer.from_pretrained(snake_case__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , snake_case__ )
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case__ , return_tensors='pt' )
_lowerCAmelCase : Optional[int] = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
_lowerCAmelCase : int = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
_lowerCAmelCase : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , snake_case__ )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.tokenizer(self.src_text , padding=snake_case__ , truncation=snake_case__ , max_length=3 , return_tensors='pt' )
_lowerCAmelCase : str = self.tokenizer(
text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=10 , return_tensors='pt' )
_lowerCAmelCase : List[Any] = targets['input_ids']
_lowerCAmelCase : Any = shift_tokens_right(snake_case__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(snake_case__ ) , {
# en_XX, A, test, EOS
'input_ids': [[25_0004, 62, 3034, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 25_0001,
} , )
| 25 | 0 |
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def lowercase (_A ):
"""simple docstring"""
return {key.lstrip('-' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Optional[int] = ArgumentParser(
'HuggingFace Datasets CLI tool' , usage='datasets-cli <command> [<args>]' , allow_abbrev=snake_case_ )
_lowerCAmelCase : Optional[int] = parser.add_subparsers(help='datasets-cli command helpers' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(snake_case_ )
EnvironmentCommand.register_subcommand(snake_case_ )
TestCommand.register_subcommand(snake_case_ )
RunBeamCommand.register_subcommand(snake_case_ )
DummyDataCommand.register_subcommand(snake_case_ )
# Parse args
_lowerCAmelCase : List[Any] = parser.parse_known_args()
if not hasattr(snake_case_ , 'func' ):
parser.print_help()
exit(1 )
_lowerCAmelCase : Any = parse_unknown_args(snake_case_ )
# Run
_lowerCAmelCase : Any = args.func(snake_case_ , **snake_case_ )
service.run()
if __name__ == "__main__":
main()
| 354 |
'''simple docstring'''
from math import isqrt
def lowercase (_A ):
"""simple docstring"""
return all(number % divisor != 0 for divisor in range(2 , isqrt(_A ) + 1 ) )
def lowercase (_A = 1_0**6 ):
"""simple docstring"""
_lowerCAmelCase : str = 0
_lowerCAmelCase : str = 1
_lowerCAmelCase : List[str] = 7
while prime_candidate < max_prime:
primes_count += is_prime(_A )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 25 | 0 |
'''simple docstring'''
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = [0] * len(lowercase__ )
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Union[str, Any] = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowercase__ ) ):
if indegree[i] == 0:
queue.append(lowercase__ )
while queue:
_lowerCAmelCase : List[Any] = queue.pop(0 )
cnt += 1
topo.append(lowercase__ )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(lowercase__ )
if cnt != len(lowercase__ ):
print('Cycle exists' )
else:
print(lowercase__ )
# Adjacency List of Graph
lowerCAmelCase : List[Any] = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 355 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : List[Any] = {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""",
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "mvp"
__magic_name__ = ["past_key_values"]
__magic_name__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , snake_case__=5_0267 , snake_case__=1024 , snake_case__=12 , snake_case__=4096 , snake_case__=16 , snake_case__=12 , snake_case__=4096 , snake_case__=16 , snake_case__=0.0 , snake_case__=0.0 , snake_case__="gelu" , snake_case__=1024 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=0.0 , snake_case__=False , snake_case__=True , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__=True , snake_case__=2 , snake_case__=2 , snake_case__=False , snake_case__=100 , snake_case__=800 , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Any = max_position_embeddings
_lowerCAmelCase : Optional[Any] = d_model
_lowerCAmelCase : Optional[int] = encoder_ffn_dim
_lowerCAmelCase : Optional[int] = encoder_layers
_lowerCAmelCase : Any = encoder_attention_heads
_lowerCAmelCase : Any = decoder_ffn_dim
_lowerCAmelCase : Optional[Any] = decoder_layers
_lowerCAmelCase : int = decoder_attention_heads
_lowerCAmelCase : Union[str, Any] = dropout
_lowerCAmelCase : List[Any] = attention_dropout
_lowerCAmelCase : List[str] = activation_dropout
_lowerCAmelCase : Optional[Any] = activation_function
_lowerCAmelCase : Any = init_std
_lowerCAmelCase : Any = encoder_layerdrop
_lowerCAmelCase : Union[str, Any] = decoder_layerdrop
_lowerCAmelCase : Optional[int] = classifier_dropout
_lowerCAmelCase : List[Any] = use_cache
_lowerCAmelCase : Optional[int] = encoder_layers
_lowerCAmelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCAmelCase : Optional[Any] = use_prompt
_lowerCAmelCase : Optional[Any] = prompt_length
_lowerCAmelCase : Any = prompt_mid_dim
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , decoder_start_token_id=snake_case__ , forced_eos_token_id=snake_case__ , **snake_case__ , )
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , snake_case__ ):
_lowerCAmelCase : Any = self.bos_token_id
warnings.warn(
F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'The config can simply be saved and uploaded again to be fixed.' )
| 25 | 0 |
'''simple docstring'''
import math
import sys
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Dict = ''
try:
with open(A__ , 'rb' ) as binary_file:
_lowerCAmelCase : int = binary_file.read()
for dat in data:
_lowerCAmelCase : int = f'{dat:08b}'
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = {'0': '0', '1': '1'}
_lowerCAmelCase , _lowerCAmelCase : Dict = '', ''
_lowerCAmelCase : List[str] = len(A__ )
for i in range(len(A__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
_lowerCAmelCase : Any = lexicon[curr_string]
result += last_match_id
_lowerCAmelCase : Optional[Any] = last_match_id + '0'
if math.loga(A__ ).is_integer():
_lowerCAmelCase : List[Any] = {}
for curr_key in list(A__ ):
_lowerCAmelCase : List[Any] = lexicon.pop(A__ )
_lowerCAmelCase : Any = new_lex
_lowerCAmelCase : str = last_match_id + '1'
index += 1
_lowerCAmelCase : Union[str, Any] = ''
return result
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = 8
try:
with open(A__ , 'wb' ) as opened_file:
_lowerCAmelCase : Tuple = [
to_write[i : i + byte_length]
for i in range(0 , len(A__ ) , A__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(A__ , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
_lowerCAmelCase : Optional[Any] = data_bits[counter:]
_lowerCAmelCase : Optional[Any] = data_bits[counter + 1 :]
return data_bits
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : str = read_file_binary(A__ )
_lowerCAmelCase : Union[str, Any] = remove_prefix(A__ )
_lowerCAmelCase : Optional[int] = decompress_data(A__ )
write_file_binary(A__ , A__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 356 |
'''simple docstring'''
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"""The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"""
)
lowerCAmelCase : str = None
lowerCAmelCase : Optional[int] = {
"""7B""": 1_10_08,
"""13B""": 1_38_24,
"""30B""": 1_79_20,
"""65B""": 2_20_16,
"""70B""": 2_86_72,
}
lowerCAmelCase : Optional[int] = {
"""7B""": 1,
"""7Bf""": 1,
"""13B""": 2,
"""13Bf""": 2,
"""30B""": 4,
"""65B""": 8,
"""70B""": 8,
"""70Bf""": 8,
}
def lowercase (_A , _A=1 , _A=2_5_6 ):
"""simple docstring"""
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def lowercase (_A ):
"""simple docstring"""
with open(_A , 'r' ) as f:
return json.load(_A )
def lowercase (_A , _A ):
"""simple docstring"""
with open(_A , 'w' ) as f:
json.dump(_A , _A )
def lowercase (_A , _A , _A , _A=True ):
"""simple docstring"""
os.makedirs(_A , exist_ok=_A )
_lowerCAmelCase : Optional[Any] = os.path.join(_A , 'tmp' )
os.makedirs(_A , exist_ok=_A )
_lowerCAmelCase : Any = read_json(os.path.join(_A , 'params.json' ) )
_lowerCAmelCase : List[str] = NUM_SHARDS[model_size]
_lowerCAmelCase : str = params['n_layers']
_lowerCAmelCase : Optional[int] = params['n_heads']
_lowerCAmelCase : int = n_heads // num_shards
_lowerCAmelCase : Optional[int] = params['dim']
_lowerCAmelCase : Union[str, Any] = dim // n_heads
_lowerCAmelCase : Union[str, Any] = 10_000.0
_lowerCAmelCase : str = 1.0 / (base ** (torch.arange(0 , _A , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
_lowerCAmelCase : Optional[Any] = params['n_kv_heads'] # for GQA / MQA
_lowerCAmelCase : str = n_heads_per_shard // num_key_value_heads
_lowerCAmelCase : Optional[int] = dim // num_key_value_heads
else: # compatibility with other checkpoints
_lowerCAmelCase : Union[str, Any] = n_heads
_lowerCAmelCase : Any = n_heads_per_shard
_lowerCAmelCase : Optional[Any] = dim
# permute for sliced rotary
def permute(_A , _A=n_heads , _A=dim , _A=dim ):
return w.view(_A , dima // n_heads // 2 , 2 , _A ).transpose(1 , 2 ).reshape(_A , _A )
print(f'Fetching all parameters from the checkpoint at {input_base_path}.' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
_lowerCAmelCase : List[Any] = torch.load(os.path.join(_A , 'consolidated.00.pth' ) , map_location='cpu' )
else:
# Sharded
_lowerCAmelCase : List[Any] = [
torch.load(os.path.join(_A , f'consolidated.{i:02d}.pth' ) , map_location='cpu' )
for i in range(_A )
]
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Union[str, Any] = {'weight_map': {}}
for layer_i in range(_A ):
_lowerCAmelCase : List[str] = f'pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
_lowerCAmelCase : str = {
f'model.layers.{layer_i}.self_attn.q_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wq.weight'] ),
f'model.layers.{layer_i}.self_attn.k_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wk.weight'] ),
f'model.layers.{layer_i}.self_attn.v_proj.weight': loaded[f'layers.{layer_i}.attention.wv.weight'],
f'model.layers.{layer_i}.self_attn.o_proj.weight': loaded[f'layers.{layer_i}.attention.wo.weight'],
f'model.layers.{layer_i}.mlp.gate_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w1.weight'],
f'model.layers.{layer_i}.mlp.down_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w2.weight'],
f'model.layers.{layer_i}.mlp.up_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w3.weight'],
f'model.layers.{layer_i}.input_layernorm.weight': loaded[f'layers.{layer_i}.attention_norm.weight'],
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[f'layers.{layer_i}.ffn_norm.weight'],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
_lowerCAmelCase : str = {
f'model.layers.{layer_i}.input_layernorm.weight': loaded[0][
f'layers.{layer_i}.attention_norm.weight'
].clone(),
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[0][
f'layers.{layer_i}.ffn_norm.weight'
].clone(),
}
_lowerCAmelCase : List[str] = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wq.weight'].view(_A , _A , _A )
for i in range(_A )
] , dim=0 , ).reshape(_A , _A ) )
_lowerCAmelCase : Optional[int] = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wk.weight'].view(
_A , _A , _A )
for i in range(_A )
] , dim=0 , ).reshape(_A , _A ) , _A , _A , _A , )
_lowerCAmelCase : Dict = torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wv.weight'].view(
_A , _A , _A )
for i in range(_A )
] , dim=0 , ).reshape(_A , _A )
_lowerCAmelCase : Dict = torch.cat(
[loaded[i][f'layers.{layer_i}.attention.wo.weight'] for i in range(_A )] , dim=1 )
_lowerCAmelCase : List[Any] = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w1.weight'] for i in range(_A )] , dim=0 )
_lowerCAmelCase : Tuple = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w2.weight'] for i in range(_A )] , dim=1 )
_lowerCAmelCase : List[Any] = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w3.weight'] for i in range(_A )] , dim=0 )
_lowerCAmelCase : int = inv_freq
for k, v in state_dict.items():
_lowerCAmelCase : Optional[Any] = filename
param_count += v.numel()
torch.save(_A , os.path.join(_A , _A ) )
_lowerCAmelCase : Dict = f'pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
_lowerCAmelCase : List[str] = {
'model.embed_tokens.weight': loaded['tok_embeddings.weight'],
'model.norm.weight': loaded['norm.weight'],
'lm_head.weight': loaded['output.weight'],
}
else:
_lowerCAmelCase : List[str] = {
'model.norm.weight': loaded[0]['norm.weight'],
'model.embed_tokens.weight': torch.cat(
[loaded[i]['tok_embeddings.weight'] for i in range(_A )] , dim=1 ),
'lm_head.weight': torch.cat([loaded[i]['output.weight'] for i in range(_A )] , dim=0 ),
}
for k, v in state_dict.items():
_lowerCAmelCase : int = filename
param_count += v.numel()
torch.save(_A , os.path.join(_A , _A ) )
# Write configs
_lowerCAmelCase : Tuple = {'total_size': param_count * 2}
write_json(_A , os.path.join(_A , 'pytorch_model.bin.index.json' ) )
_lowerCAmelCase : Optional[int] = params['ffn_dim_multiplier'] if 'ffn_dim_multiplier' in params else 1
_lowerCAmelCase : int = params['multiple_of'] if 'multiple_of' in params else 2_5_6
_lowerCAmelCase : List[Any] = LlamaConfig(
hidden_size=_A , intermediate_size=compute_intermediate_size(_A , _A , _A ) , num_attention_heads=params['n_heads'] , num_hidden_layers=params['n_layers'] , rms_norm_eps=params['norm_eps'] , num_key_value_heads=_A , )
config.save_pretrained(_A )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('Loading the checkpoint in a Llama model.' )
_lowerCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained(_A , torch_dtype=torch.floataa , low_cpu_mem_usage=_A )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('Saving in the Transformers format.' )
model.save_pretrained(_A , safe_serialization=_A )
shutil.rmtree(_A )
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'Saving a {tokenizer_class.__name__} to {tokenizer_path}.' )
_lowerCAmelCase : List[Any] = tokenizer_class(_A )
tokenizer.save_pretrained(_A )
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument(
'--input_dir' , help='Location of LLaMA weights, which contains tokenizer.model and model folders' , )
parser.add_argument(
'--model_size' , choices=['7B', '7Bf', '13B', '13Bf', '30B', '65B', '70B', '70Bf', 'tokenizer_only'] , )
parser.add_argument(
'--output_dir' , help='Location to write HF model and tokenizer' , )
parser.add_argument('--safe_serialization' , type=_A , help='Whether or not to save using `safetensors`.' )
_lowerCAmelCase : Any = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
_lowerCAmelCase : Dict = os.path.join(args.input_dir , 'tokenizer.model' )
write_tokenizer(args.output_dir , _A )
if __name__ == "__main__":
main()
| 25 | 0 |
'''simple docstring'''
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase__ ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
@register_to_config
def __init__( self , *,
snake_case__ = 4 , snake_case__ = 768 , snake_case__ , snake_case__ , ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = nn.Parameter(torch.zeros(__lowerCAmelCase ) )
# parameters for additional clip time embeddings
_lowerCAmelCase : List[str] = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase : List[Any] = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
# parameters for encoder hidden states
_lowerCAmelCase : List[str] = clip_extra_context_tokens
_lowerCAmelCase : str = nn.Linear(
__lowerCAmelCase , self.clip_extra_context_tokens * cross_attention_dim )
_lowerCAmelCase : Any = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase : int = nn.LayerNorm(__lowerCAmelCase )
def a ( self , *, snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
_lowerCAmelCase : Tuple = image_embeddings.shape[0]
_lowerCAmelCase : Union[str, Any] = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
_lowerCAmelCase : Dict = classifier_free_guidance_embeddings.expand(
__lowerCAmelCase , -1 )
_lowerCAmelCase : Any = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
_lowerCAmelCase : Union[str, Any] = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
_lowerCAmelCase : int = self.embedding_proj(__lowerCAmelCase )
_lowerCAmelCase : Tuple = self.clip_image_embeddings_project_to_time_embeddings(__lowerCAmelCase )
_lowerCAmelCase : Any = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
_lowerCAmelCase : Dict = self.clip_extra_context_tokens_proj(__lowerCAmelCase )
_lowerCAmelCase : str = clip_extra_context_tokens.reshape(__lowerCAmelCase , -1 , self.clip_extra_context_tokens )
_lowerCAmelCase : Optional[int] = clip_extra_context_tokens.permute(0 , 2 , 1 )
_lowerCAmelCase : Tuple = self.encoder_hidden_states_proj(__lowerCAmelCase )
_lowerCAmelCase : Dict = self.text_encoder_hidden_states_norm(__lowerCAmelCase )
_lowerCAmelCase : List[str] = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 357 |
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
__magic_name__ = None
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = None
__magic_name__ = None
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = True
__magic_name__ = None
__magic_name__ = 1
__magic_name__ = None
__magic_name__ = False
__magic_name__ = None
__magic_name__ = None
def a ( self ):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(snake_case__ ) for k, v in self.__dict__.items()} )
| 25 | 0 |
'''simple docstring'''
import argparse
import json
import subprocess
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Union[str, Any] = (
f'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
' https://api.github.com/repos/huggingface/transformers/actions/runners'
)
_lowerCAmelCase : Optional[int] = subprocess.run(_UpperCamelCase , shell=_UpperCamelCase , stdout=subprocess.PIPE )
_lowerCAmelCase : Any = output.stdout.decode('utf-8' )
_lowerCAmelCase : str = json.loads(_UpperCamelCase )
_lowerCAmelCase : Tuple = status['runners']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_UpperCamelCase )
# save the result so we can report them on Slack
with open('offline_runners.txt' , 'w' ) as fp:
fp.write(json.dumps(_UpperCamelCase ) )
if len(_UpperCamelCase ) > 0:
_lowerCAmelCase : List[str] = '\n'.join([x['name'] for x in offline_runners] )
raise ValueError(f'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def lowercase (_A ):
"""simple docstring"""
return values.split(',' )
lowerCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--target_runners""",
default=None,
type=list_str,
required=True,
help="""Comma-separated list of runners to check status.""",
)
parser.add_argument(
"""--token""", default=None, type=str, required=True, help="""A token that has actions:read permission."""
)
lowerCAmelCase : str = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 358 |
'''simple docstring'''
lowerCAmelCase : List[str] = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCAmelCase : int = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCAmelCase : List[str] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 25 | 0 |
'''simple docstring'''
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
lowerCAmelCase : Dict = [
{"""dataset""": """wikipedia""", """config_name""": """20220301.de"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.en"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.fr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.frr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.it"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.simple"""},
{"""dataset""": """snli""", """config_name""": """plain_text"""},
{"""dataset""": """eli5""", """config_name""": """LFQA_reddit"""},
{"""dataset""": """wiki40b""", """config_name""": """en"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.compressed"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.no_index"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.multiset.no_index"""},
{"""dataset""": """natural_questions""", """config_name""": """default"""},
]
def lowercase (_A=True ):
"""simple docstring"""
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=SCREAMING_SNAKE_CASE_ ) )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = None
__magic_name__ = None
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
with TemporaryDirectory() as tmp_dir:
_lowerCAmelCase : Any = dataset_module_factory(__lowerCAmelCase , cache_dir=__lowerCAmelCase )
_lowerCAmelCase : Union[str, Any] = import_main_class(dataset_module.module_path , dataset=__lowerCAmelCase )
_lowerCAmelCase : Optional[Any] = builder_cls(
cache_dir=__lowerCAmelCase , config_name=__lowerCAmelCase , hash=dataset_module.hash , )
_lowerCAmelCase : Optional[int] = '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=__lowerCAmelCase ).replace(os.sep , '/' ),
config.DATASET_INFO_FILENAME,
] )
_lowerCAmelCase : Optional[Any] = cached_path(__lowerCAmelCase , cache_dir=__lowerCAmelCase )
self.assertTrue(os.path.exists(__lowerCAmelCase ) )
@pytest.mark.integration
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple'
_lowerCAmelCase : Tuple = dataset_module_factory('wikipedia' , cache_dir=lowerCAmelCase__ )
_lowerCAmelCase : List[str] = import_main_class(dataset_module.module_path )
_lowerCAmelCase : Optional[Any] = builder_cls(
cache_dir=lowerCAmelCase__ , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
_lowerCAmelCase : Optional[Any] = None
builder_instance.download_and_prepare()
_lowerCAmelCase : Optional[int] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = dataset_module_factory('wikipedia' , cache_dir=lowerCAmelCase__ )
_lowerCAmelCase : Union[str, Any] = import_main_class(dataset_module.module_path , dataset=lowerCAmelCase__ )
_lowerCAmelCase : Tuple = builder_cls(
cache_dir=lowerCAmelCase__ , config_name='20220301.frr' , hash=dataset_module.hash , )
_lowerCAmelCase : List[str] = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert "train" in ds
assert isinstance(ds['train'] , lowerCAmelCase__ )
assert next(iter(ds['train'] ) )
| 359 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase : Union[str, Any] = {
"""configuration_resnet""": ["""RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ResNetConfig""", """ResNetOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = [
"""RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ResNetForImageClassification""",
"""ResNetModel""",
"""ResNetPreTrainedModel""",
"""ResNetBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
"""TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFResNetForImageClassification""",
"""TFResNetModel""",
"""TFResNetPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
"""FlaxResNetForImageClassification""",
"""FlaxResNetModel""",
"""FlaxResNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
lowerCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 25 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] = '▁'
lowerCAmelCase : List[str] = {'vocab_file': 'sentencepiece.bpe.model'}
lowerCAmelCase : Optional[Any] = {
'vocab_file': {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'
),
}
}
lowerCAmelCase : Optional[Any] = {
'xlm-roberta-base': 5_12,
'xlm-roberta-large': 5_12,
'xlm-roberta-large-finetuned-conll02-dutch': 5_12,
'xlm-roberta-large-finetuned-conll02-spanish': 5_12,
'xlm-roberta-large-finetuned-conll03-english': 5_12,
'xlm-roberta-large-finetuned-conll03-german': 5_12,
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ["input_ids", "attention_mask"]
def __init__( self , snake_case__ , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token
_lowerCAmelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , cls_token=_snake_case , pad_token=_snake_case , mask_token=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
_lowerCAmelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_snake_case ) )
_lowerCAmelCase : Any = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_lowerCAmelCase : List[str] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_lowerCAmelCase : List[Any] = 1
_lowerCAmelCase : Optional[Any] = len(self.sp_model ) + self.fairseq_offset
_lowerCAmelCase : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.__dict__.copy()
_lowerCAmelCase : str = None
_lowerCAmelCase : Tuple = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : int = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_lowerCAmelCase : List[Any] = {}
_lowerCAmelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase : str = [self.cls_token_id]
_lowerCAmelCase : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a ( self , snake_case__ , snake_case__ = None , snake_case__ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case )
if token_ids_a is None:
return [1] + ([0] * len(_snake_case )) + [1]
return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case )) + [1]
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = [self.sep_token_id]
_lowerCAmelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def a ( self ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a ( self , snake_case__ ):
'''simple docstring'''
return self.sp_model.encode(_snake_case , out_type=_snake_case )
def a ( self , snake_case__ ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowerCAmelCase : Tuple = self.sp_model.PieceToId(_snake_case )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def a ( self , snake_case__ ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ''.join(_snake_case ).replace(_snake_case , ' ' ).strip()
return out_string
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if not os.path.isdir(_snake_case ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCAmelCase : Union[str, Any] = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case , 'wb' ) as fi:
_lowerCAmelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,)
| 360 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {
"""shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "nat"
__magic_name__ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , snake_case__=4 , snake_case__=3 , snake_case__=64 , snake_case__=[3, 4, 6, 5] , snake_case__=[2, 4, 8, 16] , snake_case__=7 , snake_case__=3.0 , snake_case__=True , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.1 , snake_case__="gelu" , snake_case__=0.02 , snake_case__=1E-5 , snake_case__=0.0 , snake_case__=None , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Union[str, Any] = patch_size
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Tuple = embed_dim
_lowerCAmelCase : Any = depths
_lowerCAmelCase : Dict = len(snake_case__ )
_lowerCAmelCase : str = num_heads
_lowerCAmelCase : Dict = kernel_size
_lowerCAmelCase : Union[str, Any] = mlp_ratio
_lowerCAmelCase : int = qkv_bias
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCAmelCase : List[str] = drop_path_rate
_lowerCAmelCase : Union[str, Any] = hidden_act
_lowerCAmelCase : Tuple = layer_norm_eps
_lowerCAmelCase : Dict = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase : str = int(embed_dim * 2 ** (len(snake_case__ ) - 1) )
_lowerCAmelCase : Any = layer_scale_init_value
_lowerCAmelCase : Any = ['stem'] + [F'stage{idx}' for idx in range(1 , len(snake_case__ ) + 1 )]
_lowerCAmelCase , _lowerCAmelCase : str = get_aligned_output_features_output_indices(
out_features=snake_case__ , out_indices=snake_case__ , stage_names=self.stage_names )
| 25 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase : int = {
"""configuration_clip""": [
"""CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPConfig""",
"""CLIPOnnxConfig""",
"""CLIPTextConfig""",
"""CLIPVisionConfig""",
],
"""processing_clip""": ["""CLIPProcessor"""],
"""tokenization_clip""": ["""CLIPTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = ["""CLIPTokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = ["""CLIPFeatureExtractor"""]
lowerCAmelCase : Union[str, Any] = ["""CLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
"""CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPModel""",
"""CLIPPreTrainedModel""",
"""CLIPTextModel""",
"""CLIPTextModelWithProjection""",
"""CLIPVisionModel""",
"""CLIPVisionModelWithProjection""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] = [
"""TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCLIPModel""",
"""TFCLIPPreTrainedModel""",
"""TFCLIPTextModel""",
"""TFCLIPVisionModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Union[str, Any] = [
"""FlaxCLIPModel""",
"""FlaxCLIPPreTrainedModel""",
"""FlaxCLIPTextModel""",
"""FlaxCLIPTextPreTrainedModel""",
"""FlaxCLIPVisionModel""",
"""FlaxCLIPVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 361 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Dict = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : str = {
"""vocab_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"""
),
},
"""tokenizer_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""",
"""roberta-base-openai-detector""": (
"""https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"""
),
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : List[str] = {
"""roberta-base""": 5_12,
"""roberta-large""": 5_12,
"""roberta-large-mnli""": 5_12,
"""distilroberta-base""": 5_12,
"""roberta-base-openai-detector""": 5_12,
"""roberta-large-openai-detector""": 5_12,
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ["input_ids", "attention_mask"]
__magic_name__ = RobertaTokenizer
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__="replace" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=False , snake_case__=True , **snake_case__ , ):
'''simple docstring'''
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , )
_lowerCAmelCase : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
_lowerCAmelCase : Tuple = getattr(snake_case__ , pre_tok_state.pop('type' ) )
_lowerCAmelCase : List[Any] = add_prefix_space
_lowerCAmelCase : List[str] = pre_tok_class(**snake_case__ )
_lowerCAmelCase : Union[str, Any] = add_prefix_space
_lowerCAmelCase : Union[str, Any] = 'post_processor'
_lowerCAmelCase : int = getattr(self.backend_tokenizer , snake_case__ , snake_case__ )
if tokenizer_component_instance:
_lowerCAmelCase : Dict = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCAmelCase : Any = tuple(state['sep'] )
if "cls" in state:
_lowerCAmelCase : str = tuple(state['cls'] )
_lowerCAmelCase : List[str] = False
if state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
_lowerCAmelCase : int = add_prefix_space
_lowerCAmelCase : Tuple = True
if state.get('trim_offsets' , snake_case__ ) != trim_offsets:
_lowerCAmelCase : Union[str, Any] = trim_offsets
_lowerCAmelCase : Optional[int] = True
if changes_to_apply:
_lowerCAmelCase : Any = getattr(snake_case__ , state.pop('type' ) )
_lowerCAmelCase : Optional[int] = component_class(**snake_case__ )
setattr(self.backend_tokenizer , snake_case__ , snake_case__ )
@property
def a ( self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value
_lowerCAmelCase : Tuple = value
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = kwargs.get('is_split_into_words' , snake_case__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = kwargs.get('is_split_into_words' , snake_case__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*snake_case__ , **snake_case__ )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : int = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def a ( self , snake_case__ , snake_case__=None ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 25 | 0 |
'''simple docstring'''
from math import sqrt
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : List[str] = 0
for i in range(1 , int(sqrt(UpperCamelCase__ ) + 1 ) ):
if n % i == 0 and i != sqrt(UpperCamelCase__ ):
total += i + n // i
elif i == sqrt(UpperCamelCase__ ):
total += i
return total - n
def lowercase (_A = 1_0_0_0_0 ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = sum(
i
for i in range(1 , UpperCamelCase__ )
if sum_of_divisors(sum_of_divisors(UpperCamelCase__ ) ) == i and sum_of_divisors(UpperCamelCase__ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 362 |
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = 0 # The first color of the flag.
lowerCAmelCase : Optional[int] = 1 # The second color of the flag.
lowerCAmelCase : int = 2 # The third color of the flag.
lowerCAmelCase : Any = (red, white, blue)
def lowercase (_A ):
"""simple docstring"""
if not sequence:
return []
if len(_A ) == 1:
return list(_A )
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : List[str] = len(_A ) - 1
_lowerCAmelCase : Optional[Any] = 0
while mid <= high:
if sequence[mid] == colors[0]:
_lowerCAmelCase , _lowerCAmelCase : Tuple = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
_lowerCAmelCase , _lowerCAmelCase : Tuple = sequence[high], sequence[mid]
high -= 1
else:
_lowerCAmelCase : Optional[int] = f'The elements inside the sequence must contains only {colors} values'
raise ValueError(_A )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : str = input("""Enter numbers separated by commas:\n""").strip()
lowerCAmelCase : Dict = [int(item.strip()) for item in user_input.split(""",""")]
print(F'''{dutch_national_flag_sort(unsorted)}''')
| 25 | 0 |
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase : List[Any] = get_tests_dir("""fixtures/test_sentencepiece_with_bytefallback.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = GPTSwaTokenizer
__magic_name__ = False
__magic_name__ = True
__magic_name__ = False
def a ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : List[Any] = GPTSwaTokenizer(_SCREAMING_SNAKE_CASE , eos_token='<unk>' , bos_token='<unk>' , pad_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = "This is a test"
_lowerCAmelCase : Optional[int] = "This is a test"
return input_text, output_text
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = "<s>"
_lowerCAmelCase : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 2000 )
def a ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 2000 )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = GPTSwaTokenizer(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase : Union[str, Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [465, 287, 265, 631, 842] )
_lowerCAmelCase : List[str] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
# fmt: off
self.assertListEqual(
_SCREAMING_SNAKE_CASE , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] , )
# fmt: on
_lowerCAmelCase : int = tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
_lowerCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
# fmt: off
self.assertListEqual(
_SCREAMING_SNAKE_CASE , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] )
# fmt: on
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = GPTSwaTokenizer(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase : str = ["This is a test", "I was born in 92000, and this is falsé."]
_lowerCAmelCase : Tuple = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertListEqual(tokenizer.encode_fast(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# Test that decode_fast returns the input text
for text, token_ids in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertEqual(tokenizer.decode_fast(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [
"<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')",
"Hey there, how are you doing this fine day?",
"This is a text with a trailing spaces followed by a dot .",
"Häj sväjs lillebrör! =)",
"Det är inget fel på Mr. Cool",
]
# fmt: off
_lowerCAmelCase : Union[str, Any] = {"input_ids": [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name='AI-Sweden/gpt-sw3-126m' , sequences=_SCREAMING_SNAKE_CASE , )
| 363 |
'''simple docstring'''
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Optional[int] = [3_1, 2_8, 3_1, 3_0, 3_1, 3_0, 3_1, 3_1, 3_0, 3_1, 3_0, 3_1]
_lowerCAmelCase : int = 6
_lowerCAmelCase : Dict = 1
_lowerCAmelCase : Optional[int] = 1_9_0_1
_lowerCAmelCase : Optional[Any] = 0
while year < 2_0_0_1:
day += 7
if (year % 4 == 0 and year % 1_0_0 != 0) or (year % 4_0_0 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
_lowerCAmelCase : List[str] = day - days_per_month[month - 2]
elif day > 2_9 and month == 2:
month += 1
_lowerCAmelCase : List[str] = day - 2_9
else:
if day > days_per_month[month - 1]:
month += 1
_lowerCAmelCase : List[str] = day - days_per_month[month - 2]
if month > 1_2:
year += 1
_lowerCAmelCase : Optional[int] = 1
if year < 2_0_0_1 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 25 | 0 |
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase : Optional[Any] = 16
lowerCAmelCase : Optional[int] = 32
def lowercase (_A , _A = 1_6 ):
"""simple docstring"""
_lowerCAmelCase : Dict = AutoTokenizer.from_pretrained('bert-base-cased' )
_lowerCAmelCase : Union[str, Any] = load_dataset('glue' , 'mrpc' )
def tokenize_function(_A ):
# max_length=None => use the model max length (it's actually the default)
_lowerCAmelCase : int = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_lowerCAmelCase : Any = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCAmelCase : Optional[Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_lowerCAmelCase : int = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_lowerCAmelCase : Tuple = 1_6
elif accelerator.mixed_precision != "no":
_lowerCAmelCase : int = 8
else:
_lowerCAmelCase : Optional[Any] = None
return tokenizer.pad(
_UpperCAmelCase , padding='longest' , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors='pt' , )
# Instantiate dataloaders.
_lowerCAmelCase : List[Any] = DataLoader(
tokenized_datasets['train'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
_lowerCAmelCase : Union[str, Any] = DataLoader(
tokenized_datasets['validation'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase : List[str] = mocked_dataloaders # noqa: F811
def lowercase (_A , _A ):
"""simple docstring"""
if os.environ.get('TESTING_MOCKED_DATALOADERS' , _UpperCAmelCase ) == "1":
_lowerCAmelCase : Any = 2
# Initialize accelerator
_lowerCAmelCase : List[str] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCAmelCase : List[Any] = config["lr"]
_lowerCAmelCase : int = int(config['num_epochs'] )
_lowerCAmelCase : Optional[Any] = int(config['seed'] )
_lowerCAmelCase : List[str] = int(config['batch_size'] )
_lowerCAmelCase : List[Any] = evaluate.load('glue' , 'mrpc' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=_UpperCAmelCase )
def inner_training_loop(_A ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(_UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCAmelCase : Optional[Any] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=_UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowerCAmelCase : Tuple = model.to(accelerator.device )
# Instantiate optimizer
_lowerCAmelCase : Optional[Any] = AdamW(params=model.parameters() , lr=_UpperCAmelCase )
_lowerCAmelCase : int = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase )
# Instantiate scheduler
_lowerCAmelCase : List[str] = get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase , num_warmup_steps=1_0_0 , num_training_steps=(len(_UpperCAmelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCAmelCase : Any = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Now we train the model
for epoch in range(_UpperCAmelCase ):
model.train()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_lowerCAmelCase : List[Any] = model(**_UpperCAmelCase )
_lowerCAmelCase : Dict = outputs.loss
accelerator.backward(_UpperCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowerCAmelCase : int = model(**_UpperCAmelCase )
_lowerCAmelCase : Tuple = outputs.logits.argmax(dim=-1 )
_lowerCAmelCase : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=_UpperCAmelCase , references=_UpperCAmelCase , )
_lowerCAmelCase : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , _UpperCAmelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
_lowerCAmelCase : Dict = parser.parse_args()
_lowerCAmelCase : Tuple = {"lr": 2E-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6}
training_function(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 364 |
'''simple docstring'''
def lowercase (_A = 1_0_0_0_0_0_0 ):
"""simple docstring"""
_lowerCAmelCase : Any = set(range(3 , _A , 2 ) )
primes.add(2 )
for p in range(3 , _A , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _A , _A ) ) )
_lowerCAmelCase : Union[str, Any] = [float(_A ) for n in range(limit + 1 )]
for p in primes:
for n in range(_A , limit + 1 , _A ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 25 | 0 |
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
lowerCAmelCase : Optional[int] = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def lowercase (_A , _A , _A , _A , _A ):
"""simple docstring"""
for attribute in key.split('.' ):
_lowerCAmelCase : int = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if weight_type is not None:
_lowerCAmelCase : Dict = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).shape
else:
_lowerCAmelCase : int = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
_lowerCAmelCase : Optional[Any] = value
elif weight_type == "weight_g":
_lowerCAmelCase : Optional[Any] = value
elif weight_type == "weight_v":
_lowerCAmelCase : Dict = value
elif weight_type == "bias":
_lowerCAmelCase : List[Any] = value
else:
_lowerCAmelCase : List[str] = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : Any = fairseq_model.state_dict()
_lowerCAmelCase : Dict = hf_model.feature_extractor
for name, value in fairseq_dict.items():
_lowerCAmelCase : Dict = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , hf_model.config.feat_extract_norm == 'group' , )
_lowerCAmelCase : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
_lowerCAmelCase : int = True
if "*" in mapped_key:
_lowerCAmelCase : Optional[int] = name.split(SCREAMING_SNAKE_CASE_ )[0].split('.' )[-2]
_lowerCAmelCase : Union[str, Any] = mapped_key.replace('*' , SCREAMING_SNAKE_CASE_ )
if "weight_g" in name:
_lowerCAmelCase : Dict = 'weight_g'
elif "weight_v" in name:
_lowerCAmelCase : Tuple = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
_lowerCAmelCase : List[Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCAmelCase : int = 'weight'
else:
_lowerCAmelCase : Any = None
set_recursively(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE_ )
logger.warning(f'Unused weights: {unused_weights}' )
def lowercase (_A , _A , _A , _A , _A ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = full_name.split('conv_layers.' )[-1]
_lowerCAmelCase : Any = name.split('.' )
_lowerCAmelCase : Union[str, Any] = int(items[0] )
_lowerCAmelCase : int = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
_lowerCAmelCase : Union[str, Any] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
_lowerCAmelCase : str = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
_lowerCAmelCase : List[Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
_lowerCAmelCase : Dict = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def lowercase (_A , _A , _A=None ):
"""simple docstring"""
_lowerCAmelCase : Dict = torch.load(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase : Optional[Any] = WavLMConfigOrig(checkpoint['cfg'] )
_lowerCAmelCase : Optional[Any] = WavLMOrig(SCREAMING_SNAKE_CASE_ )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
_lowerCAmelCase : Tuple = WavLMConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
else:
_lowerCAmelCase : Any = WavLMConfig()
_lowerCAmelCase : int = WavLMModel(SCREAMING_SNAKE_CASE_ )
recursively_load_weights(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
hf_wavlm.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
lowerCAmelCase : Any = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 365 |
'''simple docstring'''
import argparse
import os
import re
lowerCAmelCase : Tuple = """src/transformers"""
# Pattern that looks at the indentation in a line.
lowerCAmelCase : str = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCAmelCase : str = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCAmelCase : Optional[int] = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCAmelCase : List[str] = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCAmelCase : Optional[int] = re.compile(r"""\[([^\]]+)\]""")
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : int = _re_indent.search(_A )
return "" if search is None else search.groups()[0]
def lowercase (_A , _A="" , _A=None , _A=None ):
"""simple docstring"""
_lowerCAmelCase : int = 0
_lowerCAmelCase : Dict = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(_A ):
index += 1
_lowerCAmelCase : Dict = ['\n'.join(lines[:index] )]
else:
_lowerCAmelCase : str = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowerCAmelCase : List[Any] = [lines[index]]
index += 1
while index < len(_A ) and (end_prompt is None or not lines[index].startswith(_A )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_A ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(_A ) )
if index < len(_A ) - 1:
_lowerCAmelCase : Union[str, Any] = [lines[index + 1]]
index += 1
else:
_lowerCAmelCase : Union[str, Any] = []
else:
blocks.append('\n'.join(_A ) )
_lowerCAmelCase : List[str] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_A ) > 0:
blocks.append('\n'.join(_A ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_A ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def lowercase (_A ):
"""simple docstring"""
def _inner(_A ):
return key(_A ).lower().replace('_' , '' )
return _inner
def lowercase (_A , _A=None ):
"""simple docstring"""
def noop(_A ):
return x
if key is None:
_lowerCAmelCase : List[Any] = noop
# Constants are all uppercase, they go first.
_lowerCAmelCase : List[Any] = [obj for obj in objects if key(_A ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowerCAmelCase : Tuple = [obj for obj in objects if key(_A )[0].isupper() and not key(_A ).isupper()]
# Functions begin with a lowercase, they go last.
_lowerCAmelCase : List[str] = [obj for obj in objects if not key(_A )[0].isupper()]
_lowerCAmelCase : Dict = ignore_underscore(_A )
return sorted(_A , key=_A ) + sorted(_A , key=_A ) + sorted(_A , key=_A )
def lowercase (_A ):
"""simple docstring"""
def _replace(_A ):
_lowerCAmelCase : Dict = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
_lowerCAmelCase : Union[str, Any] = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : int = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(_A )] ) + "]"
_lowerCAmelCase : Tuple = import_statement.split('\n' )
if len(_A ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowerCAmelCase : Optional[Any] = 2 if lines[1].strip() == '[' else 1
_lowerCAmelCase : List[str] = [(i, _re_strip_line.search(_A ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowerCAmelCase : Dict = sort_objects(_A , key=lambda _A : x[1] )
_lowerCAmelCase : Tuple = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_A ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowerCAmelCase : Tuple = _re_bracket_content.sub(_replace , lines[1] )
else:
_lowerCAmelCase : Optional[Any] = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : List[str] = keys[:-1]
_lowerCAmelCase : Optional[Any] = get_indent(lines[1] ) + ', '.join([f'"{k}"' for k in sort_objects(_A )] )
return "\n".join(_A )
else:
# Finally we have to deal with imports fitting on one line
_lowerCAmelCase : Union[str, Any] = _re_bracket_content.sub(_replace , _A )
return import_statement
def lowercase (_A , _A=True ):
"""simple docstring"""
with open(_A , encoding='utf-8' ) as f:
_lowerCAmelCase : Any = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowerCAmelCase : Tuple = split_code_in_indented_blocks(
_A , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_A ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowerCAmelCase : Tuple = main_blocks[block_idx]
_lowerCAmelCase : int = block.split('\n' )
# Get to the start of the imports.
_lowerCAmelCase : Tuple = 0
while line_idx < len(_A ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowerCAmelCase : Dict = len(_A )
else:
line_idx += 1
if line_idx >= len(_A ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowerCAmelCase : str = '\n'.join(block_lines[line_idx:-1] )
_lowerCAmelCase : Tuple = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowerCAmelCase : List[Any] = split_code_in_indented_blocks(_A , indent_level=_A )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowerCAmelCase : Optional[int] = _re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowerCAmelCase : int = [(pattern.search(_A ).groups()[0] if pattern.search(_A ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowerCAmelCase : Dict = [(i, key) for i, key in enumerate(_A ) if key is not None]
_lowerCAmelCase : Optional[int] = [x[0] for x in sorted(_A , key=lambda _A : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowerCAmelCase : int = 0
_lowerCAmelCase : Optional[Any] = []
for i in range(len(_A ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
_lowerCAmelCase : Optional[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(_A )
count += 1
# And we put our main block back together with its first and last line.
_lowerCAmelCase : Optional[int] = '\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(_A ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(_A , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(_A ) )
def lowercase (_A=True ):
"""simple docstring"""
_lowerCAmelCase : int = []
for root, _, files in os.walk(_A ):
if "__init__.py" in files:
_lowerCAmelCase : Optional[Any] = sort_imports(os.path.join(_A , '__init__.py' ) , check_only=_A )
if result:
_lowerCAmelCase : Optional[int] = [os.path.join(_A , '__init__.py' )]
if len(_A ) > 0:
raise ValueError(f'Would overwrite {len(_A )} files, run `make style`.' )
if __name__ == "__main__":
lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
lowerCAmelCase : List[str] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 25 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] = {
'SCUT-DLVCLab/lilt-roberta-en-base': (
'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'
),
}
class UpperCamelCase__ ( UpperCamelCase__ ):
"""simple docstring"""
__magic_name__ = "lilt"
def __init__( self , snake_case__=3_0522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=0 , snake_case__="absolute" , snake_case__=None , snake_case__=4 , snake_case__=1024 , **snake_case__ , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
_lowerCAmelCase : Tuple = vocab_size
_lowerCAmelCase : Optional[int] = hidden_size
_lowerCAmelCase : Union[str, Any] = num_hidden_layers
_lowerCAmelCase : str = num_attention_heads
_lowerCAmelCase : int = hidden_act
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : Optional[int] = hidden_dropout_prob
_lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCAmelCase : Optional[int] = max_position_embeddings
_lowerCAmelCase : Optional[Any] = type_vocab_size
_lowerCAmelCase : str = initializer_range
_lowerCAmelCase : Any = layer_norm_eps
_lowerCAmelCase : Optional[Any] = position_embedding_type
_lowerCAmelCase : Optional[int] = classifier_dropout
_lowerCAmelCase : List[str] = channel_shrink_ratio
_lowerCAmelCase : Tuple = max_ad_position_embeddings
| 366 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = KandinskyVaaInpaintPipeline
__magic_name__ = ["image_embeds", "negative_image_embeds", "image", "mask_image"]
__magic_name__ = [
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
__magic_name__ = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__magic_name__ = False
@property
def a ( self ):
'''simple docstring'''
return 32
@property
def a ( self ):
'''simple docstring'''
return 32
@property
def a ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def a ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def a ( self ):
'''simple docstring'''
return 100
@property
def a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_lowerCAmelCase : Union[str, Any] = UNetaDConditionModel(**snake_case__ )
return model
@property
def a ( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Dict = VQModel(**self.dummy_movq_kwargs )
return model
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.dummy_unet
_lowerCAmelCase : List[Any] = self.dummy_movq
_lowerCAmelCase : Union[str, Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , steps_offset=1 , prediction_type='epsilon' , thresholding=snake_case__ , )
_lowerCAmelCase : Any = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def a ( self , snake_case__ , snake_case__=0 ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
_lowerCAmelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case__ )
# create init_image
_lowerCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
_lowerCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(snake_case__ ) ).convert('RGB' ).resize((256, 256) )
# create mask
_lowerCAmelCase : List[str] = np.ones((64, 64) , dtype=np.floataa )
_lowerCAmelCase : Dict = 0
if str(snake_case__ ).startswith('mps' ):
_lowerCAmelCase : Optional[Any] = torch.manual_seed(snake_case__ )
else:
_lowerCAmelCase : List[Any] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
_lowerCAmelCase : Optional[int] = {
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = 'cpu'
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Dict = self.pipeline_class(**snake_case__ )
_lowerCAmelCase : Optional[int] = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
_lowerCAmelCase : Union[str, Any] = pipe(**self.get_dummy_inputs(snake_case__ ) )
_lowerCAmelCase : int = output.images
_lowerCAmelCase : int = pipe(
**self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0]
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
_lowerCAmelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
print(F'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : List[str] = np.array(
[0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def a ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' )
_lowerCAmelCase : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
_lowerCAmelCase : Dict = np.ones((768, 768) , dtype=np.floataa )
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : List[str] = 'a hat'
_lowerCAmelCase : Any = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(snake_case__ )
_lowerCAmelCase : Union[str, Any] = KandinskyVaaInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa )
_lowerCAmelCase : Optional[Any] = pipeline.to(snake_case__ )
pipeline.set_progress_bar_config(disable=snake_case__ )
_lowerCAmelCase : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase , _lowerCAmelCase : Dict = pipe_prior(
snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
_lowerCAmelCase : Optional[Any] = pipeline(
image=snake_case__ , mask_image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=100 , height=768 , width=768 , output_type='np' , )
_lowerCAmelCase : Union[str, Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 25 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowercase (_A , _A , _A , _A ): # noqa: E741
"""simple docstring"""
while r - l > 1:
_lowerCAmelCase : int = (l + r) // 2
if v[m] >= key:
_lowerCAmelCase : Tuple = m
else:
_lowerCAmelCase : Tuple = m # noqa: E741
return r
def lowercase (_A ):
"""simple docstring"""
if len(__lowerCamelCase ) == 0:
return 0
_lowerCAmelCase : Tuple = [0] * len(__lowerCamelCase )
_lowerCAmelCase : List[str] = 1
_lowerCAmelCase : List[Any] = v[0]
for i in range(1 , len(__lowerCamelCase ) ):
if v[i] < tail[0]:
_lowerCAmelCase : List[str] = v[i]
elif v[i] > tail[length - 1]:
_lowerCAmelCase : Union[str, Any] = v[i]
length += 1
else:
_lowerCAmelCase : int = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def lowercase (_A ):
"""simple docstring"""
if not postfix_notation:
return 0
_lowerCAmelCase : int = {'+', '-', '*', '/'}
_lowerCAmelCase : list[Any] = []
for token in postfix_notation:
if token in operations:
_lowerCAmelCase , _lowerCAmelCase : Tuple = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(_A ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 0 |
'''simple docstring'''
def lowercase (_A ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ):
raise ValueError('check_bouncy() accepts only integer arguments' )
_lowerCAmelCase : Optional[int] = str(_snake_case )
_lowerCAmelCase : Tuple = "".join(sorted(_snake_case ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def lowercase (_A = 9_9 ):
"""simple docstring"""
if not 0 < percent < 1_0_0:
raise ValueError('solution() only accepts values from 0 to 100' )
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Union[str, Any] = 1
while True:
if check_bouncy(_snake_case ):
bouncy_num += 1
if (bouncy_num / num) * 1_0_0 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'''{solution(99)}''')
| 368 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] = {
"""google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""",
"""google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""",
"""google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "mobilenet_v2"
def __init__( self , snake_case__=3 , snake_case__=224 , snake_case__=1.0 , snake_case__=8 , snake_case__=8 , snake_case__=6 , snake_case__=32 , snake_case__=True , snake_case__=True , snake_case__="relu6" , snake_case__=True , snake_case__=0.8 , snake_case__=0.02 , snake_case__=0.001 , snake_case__=255 , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Union[str, Any] = image_size
_lowerCAmelCase : List[Any] = depth_multiplier
_lowerCAmelCase : List[Any] = depth_divisible_by
_lowerCAmelCase : Optional[Any] = min_depth
_lowerCAmelCase : str = expand_ratio
_lowerCAmelCase : str = output_stride
_lowerCAmelCase : Any = first_layer_is_expansion
_lowerCAmelCase : int = finegrained_output
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : List[str] = tf_padding
_lowerCAmelCase : Optional[int] = classifier_dropout_prob
_lowerCAmelCase : int = initializer_range
_lowerCAmelCase : Optional[int] = layer_norm_eps
_lowerCAmelCase : str = semantic_loss_ignore_index
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = version.parse("1.11" )
@property
def a ( self ):
'''simple docstring'''
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def a ( self ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def a ( self ):
'''simple docstring'''
return 1E-4
| 25 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] = {
"""SenseTime/deformable-detr""": """https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json""",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class UpperCamelCase__ ( _a ):
"""simple docstring"""
__magic_name__ = "deformable_detr"
__magic_name__ = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , snake_case__=True , snake_case__=None , snake_case__=3 , snake_case__=300 , snake_case__=1024 , snake_case__=6 , snake_case__=1024 , snake_case__=8 , snake_case__=6 , snake_case__=1024 , snake_case__=8 , snake_case__=0.0 , snake_case__=True , snake_case__="relu" , snake_case__=256 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=1.0 , snake_case__=True , snake_case__=False , snake_case__="sine" , snake_case__="resnet50" , snake_case__=True , snake_case__=False , snake_case__=4 , snake_case__=4 , snake_case__=4 , snake_case__=False , snake_case__=300 , snake_case__=False , snake_case__=1 , snake_case__=5 , snake_case__=2 , snake_case__=1 , snake_case__=1 , snake_case__=5 , snake_case__=2 , snake_case__=0.1 , snake_case__=0.25 , snake_case__=False , **snake_case__ , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
_lowerCAmelCase : Optional[Any] = CONFIG_MAPPING["resnet"](out_features=['stage4'] )
elif isinstance(_a , _a ):
_lowerCAmelCase : Dict = backbone_config.get('model_type' )
_lowerCAmelCase : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
_lowerCAmelCase : int = config_class.from_dict(_a )
_lowerCAmelCase : str = use_timm_backbone
_lowerCAmelCase : List[str] = backbone_config
_lowerCAmelCase : Union[str, Any] = num_channels
_lowerCAmelCase : Optional[int] = num_queries
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : Tuple = d_model
_lowerCAmelCase : str = encoder_ffn_dim
_lowerCAmelCase : List[str] = encoder_layers
_lowerCAmelCase : List[Any] = encoder_attention_heads
_lowerCAmelCase : Dict = decoder_ffn_dim
_lowerCAmelCase : List[Any] = decoder_layers
_lowerCAmelCase : List[str] = decoder_attention_heads
_lowerCAmelCase : Tuple = dropout
_lowerCAmelCase : List[Any] = attention_dropout
_lowerCAmelCase : List[str] = activation_dropout
_lowerCAmelCase : Union[str, Any] = activation_function
_lowerCAmelCase : List[str] = init_std
_lowerCAmelCase : str = init_xavier_std
_lowerCAmelCase : Union[str, Any] = encoder_layerdrop
_lowerCAmelCase : Optional[Any] = auxiliary_loss
_lowerCAmelCase : Dict = position_embedding_type
_lowerCAmelCase : Union[str, Any] = backbone
_lowerCAmelCase : int = use_pretrained_backbone
_lowerCAmelCase : Optional[int] = dilation
# deformable attributes
_lowerCAmelCase : Dict = num_feature_levels
_lowerCAmelCase : int = encoder_n_points
_lowerCAmelCase : Optional[int] = decoder_n_points
_lowerCAmelCase : str = two_stage
_lowerCAmelCase : Tuple = two_stage_num_proposals
_lowerCAmelCase : Optional[Any] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
_lowerCAmelCase : str = class_cost
_lowerCAmelCase : int = bbox_cost
_lowerCAmelCase : Optional[int] = giou_cost
# Loss coefficients
_lowerCAmelCase : int = mask_loss_coefficient
_lowerCAmelCase : Union[str, Any] = dice_loss_coefficient
_lowerCAmelCase : List[str] = bbox_loss_coefficient
_lowerCAmelCase : Tuple = giou_loss_coefficient
_lowerCAmelCase : List[str] = eos_coefficient
_lowerCAmelCase : Tuple = focal_alpha
_lowerCAmelCase : Optional[int] = disable_custom_kernels
super().__init__(is_encoder_decoder=_a , **_a )
@property
def a ( self ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def a ( self ):
'''simple docstring'''
return self.d_model
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_lowerCAmelCase : Any = self.backbone_config.to_dict()
_lowerCAmelCase : List[Any] = self.__class__.model_type
return output
| 369 |
'''simple docstring'''
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = SMALL_MODEL_IDENTIFIER
_lowerCAmelCase : Optional[int] = 'pt'
_lowerCAmelCase : Tuple = 'tf'
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Tuple = TFAutoModel.from_pretrained(self.test_model , from_pt=snake_case__ )
model_tf.save_pretrained(snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = 'mock_framework'
# Framework provided - return whatever the user provides
_lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model , snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(snake_case__ )
_lowerCAmelCase : Dict = FeaturesManager.determine_framework(snake_case__ , snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(snake_case__ )
_lowerCAmelCase : int = FeaturesManager.determine_framework(snake_case__ , snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
def a ( self ):
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(snake_case__ )
_lowerCAmelCase : Tuple = FeaturesManager.determine_framework(snake_case__ )
self.assertEqual(snake_case__ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(snake_case__ )
_lowerCAmelCase : Optional[int] = FeaturesManager.determine_framework(snake_case__ )
self.assertEqual(snake_case__ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(snake_case__ ):
_lowerCAmelCase : str = FeaturesManager.determine_framework(snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = MagicMock(return_value=snake_case__ )
with patch('transformers.onnx.features.is_tf_available' , snake_case__ ):
_lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(snake_case__ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
_lowerCAmelCase : Any = MagicMock(return_value=snake_case__ )
with patch('transformers.onnx.features.is_torch_available' , snake_case__ ):
_lowerCAmelCase : Union[str, Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(snake_case__ , self.framework_tf )
# Both in environment -> use PyTorch
_lowerCAmelCase : int = MagicMock(return_value=snake_case__ )
_lowerCAmelCase : Optional[int] = MagicMock(return_value=snake_case__ )
with patch('transformers.onnx.features.is_tf_available' , snake_case__ ), patch(
'transformers.onnx.features.is_torch_available' , snake_case__ ):
_lowerCAmelCase : Dict = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(snake_case__ , self.framework_pt )
# Both not in environment -> raise error
_lowerCAmelCase : str = MagicMock(return_value=snake_case__ )
_lowerCAmelCase : Optional[Any] = MagicMock(return_value=snake_case__ )
with patch('transformers.onnx.features.is_tf_available' , snake_case__ ), patch(
'transformers.onnx.features.is_torch_available' , snake_case__ ):
with self.assertRaises(snake_case__ ):
_lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model )
| 25 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowercase (_A ):
"""simple docstring"""
if not nums:
return 0
_lowerCAmelCase : List[str] = nums[0]
_lowerCAmelCase : Tuple = 0
for num in nums[1:]:
_lowerCAmelCase : Union[str, Any] = (
max_excluding + num,
max(_lowerCAmelCase , _lowerCAmelCase ),
)
return max(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : Any = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : List[str] = {
"""facebook/nllb-large-en-ro""": 10_24,
"""facebook/nllb-200-distilled-600M""": 10_24,
}
# fmt: off
lowerCAmelCase : Optional[int] = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = ["input_ids", "attention_mask"]
__magic_name__ = NllbTokenizer
__magic_name__ = []
__magic_name__ = []
def __init__( self , snake_case__=None , snake_case__=None , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=False , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
_lowerCAmelCase : Dict = legacy_behaviour
super().__init__(
vocab_file=snake_case__ , tokenizer_file=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , src_lang=snake_case__ , tgt_lang=snake_case__ , additional_special_tokens=snake_case__ , legacy_behaviour=snake_case__ , **snake_case__ , )
_lowerCAmelCase : List[str] = vocab_file
_lowerCAmelCase : int = False if not self.vocab_file else True
_lowerCAmelCase : str = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
_lowerCAmelCase : Any = {
lang_code: self.convert_tokens_to_ids(snake_case__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_lowerCAmelCase : List[Any] = src_lang if src_lang is not None else 'eng_Latn'
_lowerCAmelCase : str = self.convert_tokens_to_ids(self._src_lang )
_lowerCAmelCase : Tuple = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def a ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
_lowerCAmelCase : Optional[Any] = src_lang
_lowerCAmelCase : Union[str, Any] = self(snake_case__ , add_special_tokens=snake_case__ , return_tensors=snake_case__ , **snake_case__ )
_lowerCAmelCase : int = self.convert_tokens_to_ids(snake_case__ )
_lowerCAmelCase : Optional[Any] = tgt_lang_id
return inputs
def a ( self , snake_case__ , snake_case__ = "eng_Latn" , snake_case__ = None , snake_case__ = "fra_Latn" , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[str] = src_lang
_lowerCAmelCase : Optional[int] = tgt_lang
return super().prepare_seqaseq_batch(snake_case__ , snake_case__ , **snake_case__ )
def a ( self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def a ( self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = self.convert_tokens_to_ids(snake_case__ )
if self.legacy_behaviour:
_lowerCAmelCase : Dict = []
_lowerCAmelCase : List[str] = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCAmelCase : int = [self.cur_lang_code]
_lowerCAmelCase : int = [self.eos_token_id]
_lowerCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
_lowerCAmelCase : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
_lowerCAmelCase : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.convert_tokens_to_ids(snake_case__ )
if self.legacy_behaviour:
_lowerCAmelCase : int = []
_lowerCAmelCase : Dict = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCAmelCase : int = [self.cur_lang_code]
_lowerCAmelCase : List[str] = [self.eos_token_id]
_lowerCAmelCase : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
_lowerCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
_lowerCAmelCase : str = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(snake_case__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
_lowerCAmelCase : Union[str, Any] = os.path.join(
snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
return (out_vocab_file,)
| 25 | 0 |
'''simple docstring'''
def lowercase (_A ):
"""simple docstring"""
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(__lowerCAmelCase ) == 0:
raise ValueError('Input list must be a non empty list' )
if len(__lowerCAmelCase ) == 1:
return True
_lowerCAmelCase : Any = series[1] - series[0]
for index in range(len(__lowerCAmelCase ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def lowercase (_A ):
"""simple docstring"""
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(__lowerCAmelCase ) == 0:
raise ValueError('Input list must be a non empty list' )
_lowerCAmelCase : Any = 0
for val in series:
answer += val
return answer / len(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371 |
'''simple docstring'''
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
lowerCAmelCase : List[str] = [
"""kernels/rwkv/wkv_cuda.cu""",
"""kernels/rwkv/wkv_op.cpp""",
"""kernels/deformable_detr/ms_deform_attn.h""",
"""kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh""",
"""models/graphormer/algos_graphormer.pyx""",
]
def lowercase (_A ):
"""simple docstring"""
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument("""--check_lib""", action="""store_true""", help="""Whether to check the build or the actual package.""")
lowerCAmelCase : Dict = parser.parse_args()
if args.check_lib:
lowerCAmelCase : Union[str, Any] = importlib.import_module("""transformers""")
lowerCAmelCase : int = Path(transformers_module.__file__).parent
else:
lowerCAmelCase : int = Path.cwd() / """build/lib/transformers"""
if not test_custom_files_are_present(transformers_path):
raise ValueError("""The built release does not contain the custom files. Fix this before going further!""")
| 25 | 0 |
'''simple docstring'''
from collections.abc import Sequence
def lowercase (_A = None ):
"""simple docstring"""
if nums is None or not nums:
raise ValueError('Input sequence should not be empty' )
_lowerCAmelCase : Optional[int] = nums[0]
for i in range(1 , len(_UpperCAmelCase ) ):
_lowerCAmelCase : Dict = nums[i]
_lowerCAmelCase : Optional[Any] = max(_UpperCAmelCase , ans + num , _UpperCAmelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowerCAmelCase = int(input("""Enter number of elements : """).strip())
lowerCAmelCase = list(map(int, input("""\nEnter the numbers : """).strip().split()))[:n]
print(max_subsequence_sum(array))
| 350 |
'''simple docstring'''
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
_lowerCAmelCase : List[str] = ''
_lowerCAmelCase : Any = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(_A ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = 0, 0
# length[i] shows the length of palindromic substring with center i
_lowerCAmelCase : List[str] = [1 for i in range(len(_A ) )]
# for each character in new_string find corresponding palindromic string
_lowerCAmelCase : Any = 0
for j in range(len(_A ) ):
_lowerCAmelCase : Optional[Any] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(_A )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_lowerCAmelCase : List[str] = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_lowerCAmelCase : Optional[Any] = j - k + 1 # noqa: E741
_lowerCAmelCase : int = j + k - 1
# update max_length and start position
if max_length < length[j]:
_lowerCAmelCase : Dict = length[j]
_lowerCAmelCase : Optional[int] = j
# create that string
_lowerCAmelCase : List[str] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 0 |
'''simple docstring'''
def lowercase (_A ):
"""simple docstring"""
for i in range(0 , _A ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def lowercase (_A ):
"""simple docstring"""
for i in range(_A , 0 , -1 ):
for _ in range(_A , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def lowercase (_A ):
"""simple docstring"""
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(_A ) # upper half
reverse_floyd(_A ) # lower half
if __name__ == "__main__":
print(r"""| /\ | |- | |- |--| |\ /| |-""")
print(r"""|/ \| |- |_ |_ |__| | \/ | |_""")
lowerCAmelCase : List[str] = 1
while K:
lowerCAmelCase : Dict = int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
lowerCAmelCase : Optional[int] = int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 351 |
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = 0
__magic_name__ = False
__magic_name__ = 3.0
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} )
self.assertDictEqual(MockClass(a=2 , b=snake_case__ ).to_kwargs() , {'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} )
@require_cuda
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
_lowerCAmelCase : Dict = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
_lowerCAmelCase : str = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , snake_case__ )
@require_multi_gpu
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(snake_case__ , env=os.environ.copy() )
if __name__ == "__main__":
lowerCAmelCase : int = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
lowerCAmelCase : Tuple = Accelerator(kwargs_handlers=[ddp_scaler])
lowerCAmelCase : Optional[Any] = torch.nn.Linear(1_00, 2_00)
lowerCAmelCase : List[str] = accelerator.prepare(model)
# Check the values changed in kwargs
lowerCAmelCase : List[Any] = """"""
lowerCAmelCase : Tuple = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 25 | 0 |
'''simple docstring'''
def lowercase (_A ):
"""simple docstring"""
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_lowerCAmelCase : str = f'Input value of [number={number}] must be an integer'
raise TypeError(lowerCamelCase__ )
if number < 1:
_lowerCAmelCase : Any = f'Input value of [number={number}] must be > 0'
raise ValueError(lowerCamelCase__ )
_lowerCAmelCase : Optional[Any] = 1
for i in range(1 , lowerCamelCase__ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 352 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] = {
"""CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": (
"""https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"""
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "trajectory_transformer"
__magic_name__ = ["past_key_values"]
__magic_name__ = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , snake_case__=100 , snake_case__=5 , snake_case__=1 , snake_case__=1 , snake_case__=249 , snake_case__=6 , snake_case__=17 , snake_case__=25 , snake_case__=4 , snake_case__=4 , snake_case__=128 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.0006 , snake_case__=512 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=1 , snake_case__=True , snake_case__=1 , snake_case__=5_0256 , snake_case__=5_0256 , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Any = action_weight
_lowerCAmelCase : Optional[int] = reward_weight
_lowerCAmelCase : Union[str, Any] = value_weight
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : Tuple = block_size
_lowerCAmelCase : List[Any] = action_dim
_lowerCAmelCase : List[Any] = observation_dim
_lowerCAmelCase : Union[str, Any] = transition_dim
_lowerCAmelCase : Tuple = learning_rate
_lowerCAmelCase : int = n_layer
_lowerCAmelCase : Any = n_head
_lowerCAmelCase : Tuple = n_embd
_lowerCAmelCase : Optional[Any] = embd_pdrop
_lowerCAmelCase : Union[str, Any] = attn_pdrop
_lowerCAmelCase : Any = resid_pdrop
_lowerCAmelCase : Optional[Any] = initializer_range
_lowerCAmelCase : List[Any] = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = kaiming_initializer_range
_lowerCAmelCase : List[Any] = use_cache
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
| 25 | 0 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def a ( self , snake_case__=0 ):
'''simple docstring'''
_lowerCAmelCase : str = np.random.RandomState(_lowerCAmelCase )
_lowerCAmelCase : Any = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowerCAmelCase : List[str] = self.get_dummy_inputs()
_lowerCAmelCase : Dict = pipe(**_lowerCAmelCase ).images
_lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase : Optional[int] = np.array([0.6_5072, 0.5_8492, 0.4_8219, 0.5_5521, 0.5_3180, 0.5_5939, 0.5_0697, 0.3_9800, 0.4_6455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_lowerCAmelCase : int = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowerCAmelCase : Any = self.get_dummy_inputs()
_lowerCAmelCase : str = pipe(**_lowerCAmelCase ).images
_lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase : str = np.array([0.6_5863, 0.5_9425, 0.4_9326, 0.5_6313, 0.5_3875, 0.5_6627, 0.5_1065, 0.3_9777, 0.4_6330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_lowerCAmelCase : Optional[int] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs()
_lowerCAmelCase : List[Any] = pipe(**_lowerCAmelCase ).images
_lowerCAmelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase : Dict = np.array([0.5_3755, 0.6_0786, 0.4_7402, 0.4_9488, 0.5_1869, 0.4_9819, 0.4_7985, 0.3_8957, 0.4_4279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_lowerCAmelCase : Union[str, Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowerCAmelCase : Optional[Any] = self.get_dummy_inputs()
_lowerCAmelCase : int = pipe(**_lowerCAmelCase ).images
_lowerCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase : List[str] = np.array([0.5_3755, 0.6_0786, 0.4_7402, 0.4_9488, 0.5_1869, 0.4_9819, 0.4_7985, 0.3_8957, 0.4_4279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_lowerCAmelCase : Any = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowerCAmelCase : Dict = self.get_dummy_inputs()
_lowerCAmelCase : int = pipe(**_lowerCAmelCase ).images
_lowerCAmelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase : Union[str, Any] = np.array([0.5_3817, 0.6_0812, 0.4_7384, 0.4_9530, 0.5_1894, 0.4_9814, 0.4_7984, 0.3_8958, 0.4_4271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_lowerCAmelCase : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs()
_lowerCAmelCase : Optional[Any] = pipe(**_lowerCAmelCase ).images
_lowerCAmelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase : int = np.array([0.5_3895, 0.6_0808, 0.4_7933, 0.4_9608, 0.5_1886, 0.4_9950, 0.4_8053, 0.3_8957, 0.4_4200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowerCAmelCase : Dict = self.get_dummy_inputs()
_lowerCAmelCase : List[str] = 3 * [inputs['prompt']]
# forward
_lowerCAmelCase : List[str] = pipe(**_lowerCAmelCase )
_lowerCAmelCase : List[Any] = output.images[0, -3:, -3:, -1]
_lowerCAmelCase : str = self.get_dummy_inputs()
_lowerCAmelCase : Optional[int] = 3 * [inputs.pop('prompt' )]
_lowerCAmelCase : str = pipe.tokenizer(
_lowerCAmelCase , padding='max_length' , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors='np' , )
_lowerCAmelCase : Optional[int] = text_inputs['input_ids']
_lowerCAmelCase : Dict = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
_lowerCAmelCase : int = prompt_embeds
# forward
_lowerCAmelCase : List[Any] = pipe(**_lowerCAmelCase )
_lowerCAmelCase : Dict = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowerCAmelCase : Optional[Any] = self.get_dummy_inputs()
_lowerCAmelCase : List[Any] = 3 * ['this is a negative prompt']
_lowerCAmelCase : Dict = negative_prompt
_lowerCAmelCase : Union[str, Any] = 3 * [inputs['prompt']]
# forward
_lowerCAmelCase : Dict = pipe(**_lowerCAmelCase )
_lowerCAmelCase : Union[str, Any] = output.images[0, -3:, -3:, -1]
_lowerCAmelCase : Dict = self.get_dummy_inputs()
_lowerCAmelCase : str = 3 * [inputs.pop('prompt' )]
_lowerCAmelCase : int = []
for p in [prompt, negative_prompt]:
_lowerCAmelCase : Union[str, Any] = pipe.tokenizer(
_lowerCAmelCase , padding='max_length' , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors='np' , )
_lowerCAmelCase : Optional[int] = text_inputs['input_ids']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = embeds
# forward
_lowerCAmelCase : Tuple = pipe(**_lowerCAmelCase )
_lowerCAmelCase : Optional[int] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@property
def a ( self ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = ort.SessionOptions()
_lowerCAmelCase : Optional[int] = False
return options
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = OnnxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowerCAmelCase : List[Any] = 'A painting of a squirrel eating a burger'
np.random.seed(0 )
_lowerCAmelCase : int = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type='np' )
_lowerCAmelCase : Dict = output.images
_lowerCAmelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : List[str] = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = DDIMScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
_lowerCAmelCase : Any = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowerCAmelCase : List[Any] = 'open neural network exchange'
_lowerCAmelCase : Tuple = np.random.RandomState(0 )
_lowerCAmelCase : str = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type='np' )
_lowerCAmelCase : Union[str, Any] = output.images
_lowerCAmelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : Dict = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
_lowerCAmelCase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowerCAmelCase : List[str] = 'open neural network exchange'
_lowerCAmelCase : Optional[Any] = np.random.RandomState(0 )
_lowerCAmelCase : Optional[int] = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type='np' )
_lowerCAmelCase : List[str] = output.images
_lowerCAmelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : Any = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = 0
def test_callback_fn(snake_case__ , snake_case__ , snake_case__ ) -> None:
_lowerCAmelCase : Union[str, Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
_lowerCAmelCase : Optional[int] = latents[0, -3:, -3:, -1]
_lowerCAmelCase : Optional[Any] = np.array(
[-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
_lowerCAmelCase : str = latents[0, -3:, -3:, -1]
_lowerCAmelCase : Dict = np.array(
[-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
_lowerCAmelCase : Any = False
_lowerCAmelCase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowerCAmelCase : List[str] = 'Andromeda galaxy in a bottle'
_lowerCAmelCase : str = np.random.RandomState(0 )
pipe(
prompt=_lowerCAmelCase , num_inference_steps=5 , guidance_scale=7.5 , generator=_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert pipe.safety_checker is None
_lowerCAmelCase : int = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_lowerCAmelCase )
_lowerCAmelCase : Dict = OnnxStableDiffusionPipeline.from_pretrained(_lowerCAmelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_lowerCAmelCase : List[Any] = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
| 353 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase : Tuple = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowerCAmelCase : Union[str, Any] = 25_00_04
lowerCAmelCase : int = 25_00_20
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = MBartaaTokenizer
__magic_name__ = MBartaaTokenizerFast
__magic_name__ = True
__magic_name__ = True
def a ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : List[Any] = MBartaaTokenizer(snake_case__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = '<s>'
_lowerCAmelCase : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(snake_case__ ) , 1054 )
def a ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1054 )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = MBartaaTokenizer(snake_case__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=snake_case__ )
_lowerCAmelCase : Any = tokenizer.tokenize('This is a test' )
self.assertListEqual(snake_case__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCAmelCase : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
_lowerCAmelCase : Optional[int] = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(
snake_case__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCAmelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = {'input_ids': [[25_0004, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [25_0004, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_0004, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name='facebook/mbart-large-50' , revision='d3913889c59cd5c9e456b269c376325eabad57e2' , )
def a ( self ):
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_lowerCAmelCase : Optional[int] = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart50', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
_lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
_lowerCAmelCase : Tuple = tokenizer_r.save_pretrained(snake_case__ )
_lowerCAmelCase : str = tokenizer_p.save_pretrained(snake_case__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
_lowerCAmelCase : Any = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(snake_case__ , snake_case__ )
# Checks everything loads correctly in the same way
_lowerCAmelCase : List[str] = tokenizer_r.from_pretrained(snake_case__ )
_lowerCAmelCase : Optional[int] = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(snake_case__ )
# Save tokenizer rust, legacy_format=True
_lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp()
_lowerCAmelCase : Dict = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ )
_lowerCAmelCase : Any = tokenizer_p.save_pretrained(snake_case__ )
# Checks it save with the same files
self.assertSequenceEqual(snake_case__ , snake_case__ )
# Checks everything loads correctly in the same way
_lowerCAmelCase : Dict = tokenizer_r.from_pretrained(snake_case__ )
_lowerCAmelCase : List[str] = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
shutil.rmtree(snake_case__ )
# Save tokenizer rust, legacy_format=False
_lowerCAmelCase : Optional[int] = tempfile.mkdtemp()
_lowerCAmelCase : int = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ )
_lowerCAmelCase : Tuple = tokenizer_p.save_pretrained(snake_case__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_lowerCAmelCase : int = tokenizer_r.from_pretrained(snake_case__ )
_lowerCAmelCase : str = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
shutil.rmtree(snake_case__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
__magic_name__ = "facebook/mbart-large-50-one-to-many-mmt"
__magic_name__ = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
__magic_name__ = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
__magic_name__ = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2]
@classmethod
def a ( cls ):
'''simple docstring'''
_lowerCAmelCase : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
_lowerCAmelCase : Dict = 1
return cls
def a ( self ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 25_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 25_0004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 25_0020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['mr_IN'] , 25_0038 )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , snake_case__ )
def a ( self ):
'''simple docstring'''
self.assertIn(snake_case__ , self.tokenizer.all_special_ids )
_lowerCAmelCase : Union[str, Any] = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
_lowerCAmelCase : List[str] = self.tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ )
_lowerCAmelCase : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
self.assertNotIn(self.tokenizer.eos_token , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , snake_case__ )
_lowerCAmelCase : List[str] = 10
_lowerCAmelCase : Any = self.tokenizer(snake_case__ , max_length=snake_case__ , truncation=snake_case__ ).input_ids[0]
self.assertEqual(ids[0] , snake_case__ )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(snake_case__ ) , snake_case__ )
def a ( self ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [25_0053, 25_0001] )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
_lowerCAmelCase : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(snake_case__ )
_lowerCAmelCase : Tuple = MBartaaTokenizer.from_pretrained(snake_case__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , snake_case__ )
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case__ , return_tensors='pt' )
_lowerCAmelCase : Optional[int] = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
_lowerCAmelCase : int = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
_lowerCAmelCase : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , snake_case__ )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.tokenizer(self.src_text , padding=snake_case__ , truncation=snake_case__ , max_length=3 , return_tensors='pt' )
_lowerCAmelCase : str = self.tokenizer(
text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=10 , return_tensors='pt' )
_lowerCAmelCase : List[Any] = targets['input_ids']
_lowerCAmelCase : Any = shift_tokens_right(snake_case__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(snake_case__ ) , {
# en_XX, A, test, EOS
'input_ids': [[25_0004, 62, 3034, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 25_0001,
} , )
| 25 | 0 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
def lowercase (_A , _A ):
"""simple docstring"""
return (
num != den and num % 1_0 == den // 1_0 and (num // 1_0) / (den % 1_0) == num / den
)
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : str = []
_lowerCAmelCase : Union[str, Any] = 1_1
_lowerCAmelCase : Optional[Any] = int('1' + '0' * digit_len )
for num in range(_A , _A ):
while den <= 9_9:
if (num != den) and (num % 1_0 == den // 1_0) and (den % 1_0 != 0):
if is_digit_cancelling(_A , _A ):
solutions.append(f'{num}/{den}' )
den += 1
num += 1
_lowerCAmelCase : Optional[int] = 1_0
return solutions
def lowercase (_A = 2 ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = 1.0
for fraction in fraction_list(_A ):
_lowerCAmelCase : Any = Fraction(_A )
result *= frac.denominator / frac.numerator
return int(_A )
if __name__ == "__main__":
print(solution())
| 354 |
'''simple docstring'''
from math import isqrt
def lowercase (_A ):
"""simple docstring"""
return all(number % divisor != 0 for divisor in range(2 , isqrt(_A ) + 1 ) )
def lowercase (_A = 1_0**6 ):
"""simple docstring"""
_lowerCAmelCase : str = 0
_lowerCAmelCase : str = 1
_lowerCAmelCase : List[str] = 7
while prime_candidate < max_prime:
primes_count += is_prime(_A )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 25 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = tempfile.mkdtemp()
_lowerCAmelCase : int = BlipImageProcessor()
_lowerCAmelCase : Optional[Any] = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
_lowerCAmelCase : Tuple = BlipaProcessor(snake_case__ , snake_case__ )
processor.save_pretrained(self.tmpdirname )
def a ( self , **snake_case__ ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case__ ).tokenizer
def a ( self , **snake_case__ ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case__ ).image_processor
def a ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_lowerCAmelCase : Union[str, Any] = [Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Any = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_lowerCAmelCase : List[str] = self.get_image_processor(do_normalize=snake_case__ , padding_value=1.0 )
_lowerCAmelCase : str = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=snake_case__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.get_image_processor()
_lowerCAmelCase : int = self.get_tokenizer()
_lowerCAmelCase : Tuple = BlipaProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
_lowerCAmelCase : Tuple = self.prepare_image_inputs()
_lowerCAmelCase : Union[str, Any] = image_processor(snake_case__ , return_tensors='np' )
_lowerCAmelCase : Tuple = processor(images=snake_case__ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_image_processor()
_lowerCAmelCase : Optional[Any] = self.get_tokenizer()
_lowerCAmelCase : Any = BlipaProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
_lowerCAmelCase : Optional[int] = 'lower newer'
_lowerCAmelCase : str = processor(text=snake_case__ )
_lowerCAmelCase : List[str] = tokenizer(snake_case__ , return_token_type_ids=snake_case__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_image_processor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = BlipaProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
_lowerCAmelCase : Dict = 'lower newer'
_lowerCAmelCase : List[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Dict = processor(text=snake_case__ , images=snake_case__ )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
# test if it raises when no input is passed
with pytest.raises(snake_case__ ):
processor()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.get_image_processor()
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : List[str] = BlipaProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
_lowerCAmelCase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : Tuple = processor.batch_decode(snake_case__ )
_lowerCAmelCase : str = tokenizer.batch_decode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_image_processor()
_lowerCAmelCase : Optional[Any] = self.get_tokenizer()
_lowerCAmelCase : Dict = BlipaProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
_lowerCAmelCase : Tuple = 'lower newer'
_lowerCAmelCase : int = self.prepare_image_inputs()
_lowerCAmelCase : str = processor(text=snake_case__ , images=snake_case__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
| 355 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : List[Any] = {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""",
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "mvp"
__magic_name__ = ["past_key_values"]
__magic_name__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , snake_case__=5_0267 , snake_case__=1024 , snake_case__=12 , snake_case__=4096 , snake_case__=16 , snake_case__=12 , snake_case__=4096 , snake_case__=16 , snake_case__=0.0 , snake_case__=0.0 , snake_case__="gelu" , snake_case__=1024 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=0.0 , snake_case__=False , snake_case__=True , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__=True , snake_case__=2 , snake_case__=2 , snake_case__=False , snake_case__=100 , snake_case__=800 , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Any = max_position_embeddings
_lowerCAmelCase : Optional[Any] = d_model
_lowerCAmelCase : Optional[int] = encoder_ffn_dim
_lowerCAmelCase : Optional[int] = encoder_layers
_lowerCAmelCase : Any = encoder_attention_heads
_lowerCAmelCase : Any = decoder_ffn_dim
_lowerCAmelCase : Optional[Any] = decoder_layers
_lowerCAmelCase : int = decoder_attention_heads
_lowerCAmelCase : Union[str, Any] = dropout
_lowerCAmelCase : List[Any] = attention_dropout
_lowerCAmelCase : List[str] = activation_dropout
_lowerCAmelCase : Optional[Any] = activation_function
_lowerCAmelCase : Any = init_std
_lowerCAmelCase : Any = encoder_layerdrop
_lowerCAmelCase : Union[str, Any] = decoder_layerdrop
_lowerCAmelCase : Optional[int] = classifier_dropout
_lowerCAmelCase : List[Any] = use_cache
_lowerCAmelCase : Optional[int] = encoder_layers
_lowerCAmelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCAmelCase : Optional[Any] = use_prompt
_lowerCAmelCase : Optional[Any] = prompt_length
_lowerCAmelCase : Any = prompt_mid_dim
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , decoder_start_token_id=snake_case__ , forced_eos_token_id=snake_case__ , **snake_case__ , )
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , snake_case__ ):
_lowerCAmelCase : Any = self.bos_token_id
warnings.warn(
F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'The config can simply be saved and uploaded again to be fixed.' )
| 25 | 0 |
'''simple docstring'''
from math import ceil, sqrt
def lowercase (_A = 1_0_0_0_0_0_0 ):
"""simple docstring"""
_lowerCAmelCase : List[str] = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
_lowerCAmelCase : str = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
_lowerCAmelCase : Dict = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F'''{solution() = }''')
| 356 |
'''simple docstring'''
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"""The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"""
)
lowerCAmelCase : str = None
lowerCAmelCase : Optional[int] = {
"""7B""": 1_10_08,
"""13B""": 1_38_24,
"""30B""": 1_79_20,
"""65B""": 2_20_16,
"""70B""": 2_86_72,
}
lowerCAmelCase : Optional[int] = {
"""7B""": 1,
"""7Bf""": 1,
"""13B""": 2,
"""13Bf""": 2,
"""30B""": 4,
"""65B""": 8,
"""70B""": 8,
"""70Bf""": 8,
}
def lowercase (_A , _A=1 , _A=2_5_6 ):
"""simple docstring"""
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def lowercase (_A ):
"""simple docstring"""
with open(_A , 'r' ) as f:
return json.load(_A )
def lowercase (_A , _A ):
"""simple docstring"""
with open(_A , 'w' ) as f:
json.dump(_A , _A )
def lowercase (_A , _A , _A , _A=True ):
"""simple docstring"""
os.makedirs(_A , exist_ok=_A )
_lowerCAmelCase : Optional[Any] = os.path.join(_A , 'tmp' )
os.makedirs(_A , exist_ok=_A )
_lowerCAmelCase : Any = read_json(os.path.join(_A , 'params.json' ) )
_lowerCAmelCase : List[str] = NUM_SHARDS[model_size]
_lowerCAmelCase : str = params['n_layers']
_lowerCAmelCase : Optional[int] = params['n_heads']
_lowerCAmelCase : int = n_heads // num_shards
_lowerCAmelCase : Optional[int] = params['dim']
_lowerCAmelCase : Union[str, Any] = dim // n_heads
_lowerCAmelCase : Union[str, Any] = 10_000.0
_lowerCAmelCase : str = 1.0 / (base ** (torch.arange(0 , _A , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
_lowerCAmelCase : Optional[Any] = params['n_kv_heads'] # for GQA / MQA
_lowerCAmelCase : str = n_heads_per_shard // num_key_value_heads
_lowerCAmelCase : Optional[int] = dim // num_key_value_heads
else: # compatibility with other checkpoints
_lowerCAmelCase : Union[str, Any] = n_heads
_lowerCAmelCase : Any = n_heads_per_shard
_lowerCAmelCase : Optional[Any] = dim
# permute for sliced rotary
def permute(_A , _A=n_heads , _A=dim , _A=dim ):
return w.view(_A , dima // n_heads // 2 , 2 , _A ).transpose(1 , 2 ).reshape(_A , _A )
print(f'Fetching all parameters from the checkpoint at {input_base_path}.' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
_lowerCAmelCase : List[Any] = torch.load(os.path.join(_A , 'consolidated.00.pth' ) , map_location='cpu' )
else:
# Sharded
_lowerCAmelCase : List[Any] = [
torch.load(os.path.join(_A , f'consolidated.{i:02d}.pth' ) , map_location='cpu' )
for i in range(_A )
]
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Union[str, Any] = {'weight_map': {}}
for layer_i in range(_A ):
_lowerCAmelCase : List[str] = f'pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
_lowerCAmelCase : str = {
f'model.layers.{layer_i}.self_attn.q_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wq.weight'] ),
f'model.layers.{layer_i}.self_attn.k_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wk.weight'] ),
f'model.layers.{layer_i}.self_attn.v_proj.weight': loaded[f'layers.{layer_i}.attention.wv.weight'],
f'model.layers.{layer_i}.self_attn.o_proj.weight': loaded[f'layers.{layer_i}.attention.wo.weight'],
f'model.layers.{layer_i}.mlp.gate_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w1.weight'],
f'model.layers.{layer_i}.mlp.down_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w2.weight'],
f'model.layers.{layer_i}.mlp.up_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w3.weight'],
f'model.layers.{layer_i}.input_layernorm.weight': loaded[f'layers.{layer_i}.attention_norm.weight'],
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[f'layers.{layer_i}.ffn_norm.weight'],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
_lowerCAmelCase : str = {
f'model.layers.{layer_i}.input_layernorm.weight': loaded[0][
f'layers.{layer_i}.attention_norm.weight'
].clone(),
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[0][
f'layers.{layer_i}.ffn_norm.weight'
].clone(),
}
_lowerCAmelCase : List[str] = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wq.weight'].view(_A , _A , _A )
for i in range(_A )
] , dim=0 , ).reshape(_A , _A ) )
_lowerCAmelCase : Optional[int] = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wk.weight'].view(
_A , _A , _A )
for i in range(_A )
] , dim=0 , ).reshape(_A , _A ) , _A , _A , _A , )
_lowerCAmelCase : Dict = torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wv.weight'].view(
_A , _A , _A )
for i in range(_A )
] , dim=0 , ).reshape(_A , _A )
_lowerCAmelCase : Dict = torch.cat(
[loaded[i][f'layers.{layer_i}.attention.wo.weight'] for i in range(_A )] , dim=1 )
_lowerCAmelCase : List[Any] = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w1.weight'] for i in range(_A )] , dim=0 )
_lowerCAmelCase : Tuple = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w2.weight'] for i in range(_A )] , dim=1 )
_lowerCAmelCase : List[Any] = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w3.weight'] for i in range(_A )] , dim=0 )
_lowerCAmelCase : int = inv_freq
for k, v in state_dict.items():
_lowerCAmelCase : Optional[Any] = filename
param_count += v.numel()
torch.save(_A , os.path.join(_A , _A ) )
_lowerCAmelCase : Dict = f'pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
_lowerCAmelCase : List[str] = {
'model.embed_tokens.weight': loaded['tok_embeddings.weight'],
'model.norm.weight': loaded['norm.weight'],
'lm_head.weight': loaded['output.weight'],
}
else:
_lowerCAmelCase : List[str] = {
'model.norm.weight': loaded[0]['norm.weight'],
'model.embed_tokens.weight': torch.cat(
[loaded[i]['tok_embeddings.weight'] for i in range(_A )] , dim=1 ),
'lm_head.weight': torch.cat([loaded[i]['output.weight'] for i in range(_A )] , dim=0 ),
}
for k, v in state_dict.items():
_lowerCAmelCase : int = filename
param_count += v.numel()
torch.save(_A , os.path.join(_A , _A ) )
# Write configs
_lowerCAmelCase : Tuple = {'total_size': param_count * 2}
write_json(_A , os.path.join(_A , 'pytorch_model.bin.index.json' ) )
_lowerCAmelCase : Optional[int] = params['ffn_dim_multiplier'] if 'ffn_dim_multiplier' in params else 1
_lowerCAmelCase : int = params['multiple_of'] if 'multiple_of' in params else 2_5_6
_lowerCAmelCase : List[Any] = LlamaConfig(
hidden_size=_A , intermediate_size=compute_intermediate_size(_A , _A , _A ) , num_attention_heads=params['n_heads'] , num_hidden_layers=params['n_layers'] , rms_norm_eps=params['norm_eps'] , num_key_value_heads=_A , )
config.save_pretrained(_A )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('Loading the checkpoint in a Llama model.' )
_lowerCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained(_A , torch_dtype=torch.floataa , low_cpu_mem_usage=_A )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('Saving in the Transformers format.' )
model.save_pretrained(_A , safe_serialization=_A )
shutil.rmtree(_A )
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'Saving a {tokenizer_class.__name__} to {tokenizer_path}.' )
_lowerCAmelCase : List[Any] = tokenizer_class(_A )
tokenizer.save_pretrained(_A )
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument(
'--input_dir' , help='Location of LLaMA weights, which contains tokenizer.model and model folders' , )
parser.add_argument(
'--model_size' , choices=['7B', '7Bf', '13B', '13Bf', '30B', '65B', '70B', '70Bf', 'tokenizer_only'] , )
parser.add_argument(
'--output_dir' , help='Location to write HF model and tokenizer' , )
parser.add_argument('--safe_serialization' , type=_A , help='Whether or not to save using `safetensors`.' )
_lowerCAmelCase : Any = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
_lowerCAmelCase : Dict = os.path.join(args.input_dir , 'tokenizer.model' )
write_tokenizer(args.output_dir , _A )
if __name__ == "__main__":
main()
| 25 | 0 |
'''simple docstring'''
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Tuple = set_counts
_lowerCAmelCase : List[Any] = max(lowercase__ )
_lowerCAmelCase : Dict = len(lowercase__ )
_lowerCAmelCase : Any = [1] * num_sets
_lowerCAmelCase : Any = list(range(lowercase__ ) )
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.get_parent(lowercase__ )
_lowerCAmelCase : Dict = self.get_parent(lowercase__ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
_lowerCAmelCase : str = 0
_lowerCAmelCase : Dict = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
_lowerCAmelCase : List[Any] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
_lowerCAmelCase : str = 0
_lowerCAmelCase : int = src_parent
_lowerCAmelCase : Optional[Any] = self.set_counts[src_parent]
_lowerCAmelCase : Tuple = max(self.max_set , lowercase__ )
return True
def a ( self , snake_case__ ):
'''simple docstring'''
if self.parents[disj_set] == disj_set:
return disj_set
_lowerCAmelCase : Optional[int] = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 357 |
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
__magic_name__ = None
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = None
__magic_name__ = None
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = True
__magic_name__ = None
__magic_name__ = 1
__magic_name__ = None
__magic_name__ = False
__magic_name__ = None
__magic_name__ = None
def a ( self ):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(snake_case__ ) for k, v in self.__dict__.items()} )
| 25 | 0 |
'''simple docstring'''
from math import factorial
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Tuple = real
if isinstance(snake_case__ , snake_case__ ):
_lowerCAmelCase : Dict = [1] * rank
else:
_lowerCAmelCase : str = rank
def __repr__( self ):
'''simple docstring'''
return (
F'{self.real}+'
F'{"+".join(str(snake_case__ )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}'
)
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , snake_case__ )
def __add__( self , snake_case__ ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
return Dual(self.real + other , self.duals )
_lowerCAmelCase : Tuple = self.duals.copy()
_lowerCAmelCase : Optional[int] = other.duals.copy()
if len(snake_case__ ) > len(snake_case__ ):
o_dual.extend([1] * (len(snake_case__ ) - len(snake_case__ )) )
elif len(snake_case__ ) < len(snake_case__ ):
s_dual.extend([1] * (len(snake_case__ ) - len(snake_case__ )) )
_lowerCAmelCase : int = []
for i in range(len(snake_case__ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , snake_case__ )
__magic_name__ = __add__
def __sub__( self , snake_case__ ):
'''simple docstring'''
return self + other * -1
def __mul__( self , snake_case__ ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
_lowerCAmelCase : Optional[int] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , snake_case__ )
_lowerCAmelCase : Optional[int] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , snake_case__ )
__magic_name__ = __mul__
def __truediv__( self , snake_case__ ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
_lowerCAmelCase : List[Any] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , snake_case__ )
raise ValueError
def __floordiv__( self , snake_case__ ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
_lowerCAmelCase : Dict = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , snake_case__ )
raise ValueError
def __pow__( self , snake_case__ ):
'''simple docstring'''
if n < 0 or isinstance(snake_case__ , snake_case__ ):
raise ValueError('power must be a positive integer' )
if n == 0:
return 1
if n == 1:
return self
_lowerCAmelCase : Dict = self
for _ in range(n - 1 ):
x *= self
return x
def lowercase (_A , _A , _A ):
"""simple docstring"""
if not callable(_UpperCAmelCase ):
raise ValueError('differentiate() requires a function as input for func' )
if not isinstance(_UpperCAmelCase , (float, int) ):
raise ValueError('differentiate() requires a float as input for position' )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError('differentiate() requires an int as input for order' )
_lowerCAmelCase : str = Dual(_UpperCAmelCase , 1 )
_lowerCAmelCase : Tuple = func(_UpperCAmelCase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def lowercase (_A ):
"""simple docstring"""
return y**2 * y**4
print(differentiate(f, 9, 2))
| 358 |
'''simple docstring'''
lowerCAmelCase : List[str] = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCAmelCase : int = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCAmelCase : List[str] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 25 | 0 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowercase (_A , _A , _A , _A , _A ):
"""simple docstring"""
with open(a__ ) as metadata_file:
_lowerCAmelCase : Dict = json.load(a__ )
_lowerCAmelCase : Optional[int] = LukeConfig(use_entity_aware_attention=a__ , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
_lowerCAmelCase : str = torch.load(a__ , map_location='cpu' )
# Load the entity vocab file
_lowerCAmelCase : Dict = load_entity_vocab(a__ )
_lowerCAmelCase : Optional[int] = RobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
_lowerCAmelCase : Any = AddedToken('<ent>' , lstrip=a__ , rstrip=a__ )
_lowerCAmelCase : str = AddedToken('<ent2>' , lstrip=a__ , rstrip=a__ )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(a__ )
with open(os.path.join(a__ , LukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(a__ , a__ )
_lowerCAmelCase : int = LukeTokenizer.from_pretrained(a__ )
# Initialize the embeddings of the special tokens
_lowerCAmelCase : List[str] = state_dict['embeddings.word_embeddings.weight']
_lowerCAmelCase : List[str] = word_emb[tokenizer.convert_tokens_to_ids(['@'] )[0]].unsqueeze(0 )
_lowerCAmelCase : Any = word_emb[tokenizer.convert_tokens_to_ids(['#'] )[0]].unsqueeze(0 )
_lowerCAmelCase : Dict = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_lowerCAmelCase : str = f'encoder.layer.{layer_index}.attention.self.'
_lowerCAmelCase : Any = state_dict[prefix + matrix_name]
_lowerCAmelCase : int = state_dict[prefix + matrix_name]
_lowerCAmelCase : Tuple = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_lowerCAmelCase : int = state_dict['entity_embeddings.entity_embeddings.weight']
_lowerCAmelCase : int = entity_emb[entity_vocab['[MASK]']]
_lowerCAmelCase : Optional[int] = LukeModel(config=a__ ).eval()
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = model.load_state_dict(a__ , strict=a__ )
if not (len(a__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f'Missing keys {", ".join(a__ )}. Expected only missing embeddings.position_ids' )
if not (all(key.startswith('entity_predictions' ) or key.startswith('lm_head' ) for key in unexpected_keys )):
raise ValueError(
'Unexpected keys'
f' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' )
# Check outputs
_lowerCAmelCase : Optional[int] = LukeTokenizer.from_pretrained(a__ , task='entity_classification' )
_lowerCAmelCase : Dict = (
'Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'
' new world number one avoid a humiliating second- round exit at Wimbledon .'
)
_lowerCAmelCase : List[Any] = (3_9, 4_2)
_lowerCAmelCase : Union[str, Any] = tokenizer(a__ , entity_spans=[span] , add_prefix_space=a__ , return_tensors='pt' )
_lowerCAmelCase : Tuple = model(**a__ )
# Verify word hidden states
if model_size == "large":
_lowerCAmelCase : int = torch.Size((1, 4_2, 1_0_2_4) )
_lowerCAmelCase : List[str] = torch.tensor(
[[0.0_133, 0.0_865, 0.0_095], [0.3_093, -0.2_576, -0.7_418], [-0.1_720, -0.2_117, -0.2_869]] )
else: # base
_lowerCAmelCase : Tuple = torch.Size((1, 4_2, 7_6_8) )
_lowerCAmelCase : Any = torch.tensor([[0.0_037, 0.1_368, -0.0_091], [0.1_099, 0.3_329, -0.1_095], [0.0_765, 0.5_335, 0.1_179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , a__ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
_lowerCAmelCase : Optional[int] = torch.Size((1, 1, 1_0_2_4) )
_lowerCAmelCase : Optional[Any] = torch.tensor([[0.0_466, -0.0_106, -0.0_179]] )
else: # base
_lowerCAmelCase : int = torch.Size((1, 1, 7_6_8) )
_lowerCAmelCase : List[str] = torch.tensor([[0.1_457, 0.1_044, 0.0_174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
f' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , a__ , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(a__ ) )
model.save_pretrained(a__ )
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = {}
with open(a__ , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(a__ ):
_lowerCAmelCase , _lowerCAmelCase : Dict = line.rstrip().split('\t' )
_lowerCAmelCase : Optional[int] = index
return entity_vocab
if __name__ == "__main__":
lowerCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
lowerCAmelCase : str = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 359 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase : Union[str, Any] = {
"""configuration_resnet""": ["""RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ResNetConfig""", """ResNetOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = [
"""RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ResNetForImageClassification""",
"""ResNetModel""",
"""ResNetPreTrainedModel""",
"""ResNetBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
"""TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFResNetForImageClassification""",
"""TFResNetModel""",
"""TFResNetPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
"""FlaxResNetForImageClassification""",
"""FlaxResNetModel""",
"""FlaxResNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
lowerCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 25 | 0 |
'''simple docstring'''
def lowercase (_A ):
"""simple docstring"""
if len(_A ) < 2:
return collection
def circle_sort_util(_A , _A , _A ) -> bool:
_lowerCAmelCase : Union[str, Any] = False
if low == high:
return swapped
_lowerCAmelCase : int = low
_lowerCAmelCase : Optional[int] = high
while left < right:
if collection[left] > collection[right]:
_lowerCAmelCase , _lowerCAmelCase : str = (
collection[right],
collection[left],
)
_lowerCAmelCase : List[Any] = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
_lowerCAmelCase , _lowerCAmelCase : int = (
collection[right + 1],
collection[left],
)
_lowerCAmelCase : Tuple = True
_lowerCAmelCase : Optional[int] = low + int((high - low) / 2 )
_lowerCAmelCase : List[str] = circle_sort_util(_A , _A , _A )
_lowerCAmelCase : List[str] = circle_sort_util(_A , mid + 1 , _A )
return swapped or left_swap or right_swap
_lowerCAmelCase : str = True
while is_not_sorted is True:
_lowerCAmelCase : List[str] = circle_sort_util(_A , 0 , len(_A ) - 1 )
return collection
if __name__ == "__main__":
lowerCAmelCase : int = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase : List[str] = [int(item) for item in user_input.split(""",""")]
print(circle_sort(unsorted))
| 360 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {
"""shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "nat"
__magic_name__ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , snake_case__=4 , snake_case__=3 , snake_case__=64 , snake_case__=[3, 4, 6, 5] , snake_case__=[2, 4, 8, 16] , snake_case__=7 , snake_case__=3.0 , snake_case__=True , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.1 , snake_case__="gelu" , snake_case__=0.02 , snake_case__=1E-5 , snake_case__=0.0 , snake_case__=None , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Union[str, Any] = patch_size
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Tuple = embed_dim
_lowerCAmelCase : Any = depths
_lowerCAmelCase : Dict = len(snake_case__ )
_lowerCAmelCase : str = num_heads
_lowerCAmelCase : Dict = kernel_size
_lowerCAmelCase : Union[str, Any] = mlp_ratio
_lowerCAmelCase : int = qkv_bias
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCAmelCase : List[str] = drop_path_rate
_lowerCAmelCase : Union[str, Any] = hidden_act
_lowerCAmelCase : Tuple = layer_norm_eps
_lowerCAmelCase : Dict = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase : str = int(embed_dim * 2 ** (len(snake_case__ ) - 1) )
_lowerCAmelCase : Any = layer_scale_init_value
_lowerCAmelCase : Any = ['stem'] + [F'stage{idx}' for idx in range(1 , len(snake_case__ ) + 1 )]
_lowerCAmelCase , _lowerCAmelCase : str = get_aligned_output_features_output_indices(
out_features=snake_case__ , out_indices=snake_case__ , stage_names=self.stage_names )
| 25 | 0 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
lowerCAmelCase : Optional[Any] = (7_20, 12_80) # Height, Width
lowerCAmelCase : Tuple = (0.4, 0.6) # if height or width lower than this scale, drop it.
lowerCAmelCase : int = 1 / 1_00
lowerCAmelCase : str = ""
lowerCAmelCase : Tuple = ""
lowerCAmelCase : int = ""
lowerCAmelCase : Optional[Any] = 2_50
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Optional[int] = get_dataset(__lowerCAmelCase , __lowerCAmelCase )
for index in range(__lowerCAmelCase ):
_lowerCAmelCase : int = random.sample(range(len(__lowerCAmelCase ) ) , 4 )
_lowerCAmelCase : Any = update_image_and_anno(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , filter_scale=__lowerCAmelCase , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_lowerCAmelCase : str = random_chars(3_2 )
_lowerCAmelCase : Optional[int] = path.split(os.sep )[-1].rsplit('.' , 1 )[0]
_lowerCAmelCase : int = f'{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'
cva.imwrite(f'{file_root}.jpg' , __lowerCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(f'Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}' )
_lowerCAmelCase : List[str] = []
for anno in new_annos:
_lowerCAmelCase : Any = anno[3] - anno[1]
_lowerCAmelCase : List[str] = anno[4] - anno[2]
_lowerCAmelCase : Dict = anno[1] + width / 2
_lowerCAmelCase : Any = anno[2] + height / 2
_lowerCAmelCase : Dict = f'{anno[0]} {x_center} {y_center} {width} {height}'
annos_list.append(__lowerCAmelCase )
with open(f'{file_root}.txt' , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : List[Any] = []
for label_file in glob.glob(os.path.join(__lowerCAmelCase , '*.txt' ) ):
_lowerCAmelCase : Any = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(__lowerCAmelCase ) as in_file:
_lowerCAmelCase : Dict = in_file.readlines()
_lowerCAmelCase : Tuple = os.path.join(__lowerCAmelCase , f'{label_name}.jpg' )
_lowerCAmelCase : Any = []
for obj_list in obj_lists:
_lowerCAmelCase : Optional[int] = obj_list.rstrip('\n' ).split(' ' )
_lowerCAmelCase : Dict = float(obj[1] ) - float(obj[3] ) / 2
_lowerCAmelCase : List[str] = float(obj[2] ) - float(obj[4] ) / 2
_lowerCAmelCase : str = float(obj[1] ) + float(obj[3] ) / 2
_lowerCAmelCase : List[Any] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(__lowerCAmelCase )
labels.append(__lowerCAmelCase )
return img_paths, labels
def lowercase (_A , _A , _A , _A , _A , _A = 0.0 , ):
"""simple docstring"""
_lowerCAmelCase : str = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
_lowerCAmelCase : List[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_lowerCAmelCase : Optional[int] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_lowerCAmelCase : Dict = int(scale_x * output_size[1] )
_lowerCAmelCase : Optional[Any] = int(scale_y * output_size[0] )
_lowerCAmelCase : str = []
_lowerCAmelCase : List[str] = []
for i, index in enumerate(__lowerCAmelCase ):
_lowerCAmelCase : List[Any] = all_img_list[index]
path_list.append(__lowerCAmelCase )
_lowerCAmelCase : Optional[Any] = all_annos[index]
_lowerCAmelCase : str = cva.imread(__lowerCAmelCase )
if i == 0: # top-left
_lowerCAmelCase : Union[str, Any] = cva.resize(__lowerCAmelCase , (divid_point_x, divid_point_y) )
_lowerCAmelCase : Optional[Any] = img
for bbox in img_annos:
_lowerCAmelCase : Dict = bbox[1] * scale_x
_lowerCAmelCase : List[str] = bbox[2] * scale_y
_lowerCAmelCase : Union[str, Any] = bbox[3] * scale_x
_lowerCAmelCase : Optional[Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
_lowerCAmelCase : Tuple = cva.resize(__lowerCAmelCase , (output_size[1] - divid_point_x, divid_point_y) )
_lowerCAmelCase : Optional[int] = img
for bbox in img_annos:
_lowerCAmelCase : int = scale_x + bbox[1] * (1 - scale_x)
_lowerCAmelCase : Union[str, Any] = bbox[2] * scale_y
_lowerCAmelCase : Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
_lowerCAmelCase : Optional[Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
_lowerCAmelCase : List[str] = cva.resize(__lowerCAmelCase , (divid_point_x, output_size[0] - divid_point_y) )
_lowerCAmelCase : List[Any] = img
for bbox in img_annos:
_lowerCAmelCase : Dict = bbox[1] * scale_x
_lowerCAmelCase : Any = scale_y + bbox[2] * (1 - scale_y)
_lowerCAmelCase : Any = bbox[3] * scale_x
_lowerCAmelCase : Tuple = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
_lowerCAmelCase : Optional[Any] = cva.resize(
__lowerCAmelCase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
_lowerCAmelCase : Any = img
for bbox in img_annos:
_lowerCAmelCase : Union[str, Any] = scale_x + bbox[1] * (1 - scale_x)
_lowerCAmelCase : Any = scale_y + bbox[2] * (1 - scale_y)
_lowerCAmelCase : Union[str, Any] = scale_x + bbox[3] * (1 - scale_x)
_lowerCAmelCase : Union[str, Any] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
_lowerCAmelCase : Optional[int] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def lowercase (_A ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
_lowerCAmelCase : List[str] = ascii_lowercase + digits
return "".join(random.choice(__lowerCAmelCase ) for _ in range(__lowerCAmelCase ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 361 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Dict = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : str = {
"""vocab_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"""
),
},
"""tokenizer_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""",
"""roberta-base-openai-detector""": (
"""https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"""
),
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : List[str] = {
"""roberta-base""": 5_12,
"""roberta-large""": 5_12,
"""roberta-large-mnli""": 5_12,
"""distilroberta-base""": 5_12,
"""roberta-base-openai-detector""": 5_12,
"""roberta-large-openai-detector""": 5_12,
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ["input_ids", "attention_mask"]
__magic_name__ = RobertaTokenizer
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__="replace" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=False , snake_case__=True , **snake_case__ , ):
'''simple docstring'''
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , )
_lowerCAmelCase : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
_lowerCAmelCase : Tuple = getattr(snake_case__ , pre_tok_state.pop('type' ) )
_lowerCAmelCase : List[Any] = add_prefix_space
_lowerCAmelCase : List[str] = pre_tok_class(**snake_case__ )
_lowerCAmelCase : Union[str, Any] = add_prefix_space
_lowerCAmelCase : Union[str, Any] = 'post_processor'
_lowerCAmelCase : int = getattr(self.backend_tokenizer , snake_case__ , snake_case__ )
if tokenizer_component_instance:
_lowerCAmelCase : Dict = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCAmelCase : Any = tuple(state['sep'] )
if "cls" in state:
_lowerCAmelCase : str = tuple(state['cls'] )
_lowerCAmelCase : List[str] = False
if state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
_lowerCAmelCase : int = add_prefix_space
_lowerCAmelCase : Tuple = True
if state.get('trim_offsets' , snake_case__ ) != trim_offsets:
_lowerCAmelCase : Union[str, Any] = trim_offsets
_lowerCAmelCase : Optional[int] = True
if changes_to_apply:
_lowerCAmelCase : Any = getattr(snake_case__ , state.pop('type' ) )
_lowerCAmelCase : Optional[int] = component_class(**snake_case__ )
setattr(self.backend_tokenizer , snake_case__ , snake_case__ )
@property
def a ( self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value
_lowerCAmelCase : Tuple = value
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = kwargs.get('is_split_into_words' , snake_case__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = kwargs.get('is_split_into_words' , snake_case__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*snake_case__ , **snake_case__ )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : int = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def a ( self , snake_case__ , snake_case__=None ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 25 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class UpperCamelCase__ :
"""simple docstring"""
__magic_name__ = BlenderbotSmallConfig
__magic_name__ = {}
__magic_name__ = "gelu"
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=False , snake_case__=99 , snake_case__=32 , snake_case__=2 , snake_case__=4 , snake_case__=37 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=20 , snake_case__=2 , snake_case__=1 , snake_case__=0 , ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = parent
_lowerCAmelCase : Union[str, Any] = batch_size
_lowerCAmelCase : Dict = seq_length
_lowerCAmelCase : Optional[int] = is_training
_lowerCAmelCase : Union[str, Any] = use_labels
_lowerCAmelCase : str = vocab_size
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : Tuple = hidden_dropout_prob
_lowerCAmelCase : Tuple = attention_probs_dropout_prob
_lowerCAmelCase : Any = max_position_embeddings
_lowerCAmelCase : Optional[int] = eos_token_id
_lowerCAmelCase : int = pad_token_id
_lowerCAmelCase : List[Any] = bos_token_id
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_lowerCAmelCase : Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_lowerCAmelCase : int = tf.concat([input_ids, eos_tensor] , axis=1 )
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_lowerCAmelCase : str = prepare_blenderbot_small_inputs_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return config, inputs_dict
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = TFBlenderbotSmallModel(config=__SCREAMING_SNAKE_CASE ).get_decoder()
_lowerCAmelCase : int = inputs_dict['input_ids']
_lowerCAmelCase : Optional[int] = input_ids[:1, :]
_lowerCAmelCase : str = inputs_dict['attention_mask'][:1, :]
_lowerCAmelCase : int = inputs_dict['head_mask']
_lowerCAmelCase : Any = 1
# first forward pass
_lowerCAmelCase : List[str] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , head_mask=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )
_lowerCAmelCase , _lowerCAmelCase : Tuple = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowerCAmelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCAmelCase : Optional[int] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_lowerCAmelCase : List[str] = tf.concat([input_ids, next_tokens] , axis=-1 )
_lowerCAmelCase : Dict = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_lowerCAmelCase : Any = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )[0]
_lowerCAmelCase : Union[str, Any] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_lowerCAmelCase : List[str] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_lowerCAmelCase : Tuple = output_from_no_past[:, -3:, random_slice_idx]
_lowerCAmelCase : Optional[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , rtol=1E-3 )
def lowercase (_A , _A , _A , _A=None , _A=None , _A=None , _A=None , _A=None , ):
"""simple docstring"""
if attention_mask is None:
_lowerCAmelCase : Union[str, Any] = tf.cast(tf.math.not_equal(lowercase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_lowerCAmelCase : Optional[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_lowerCAmelCase : Any = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowerCAmelCase : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_lowerCAmelCase : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCamelCase__ ( _A , _A , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
__magic_name__ = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
__magic_name__ = (
{
"conversational": TFBlenderbotSmallForConditionalGeneration,
"feature-extraction": TFBlenderbotSmallModel,
"summarization": TFBlenderbotSmallForConditionalGeneration,
"text2text-generation": TFBlenderbotSmallForConditionalGeneration,
"translation": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
__magic_name__ = True
__magic_name__ = False
__magic_name__ = False
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = TFBlenderbotSmallModelTester(self )
_lowerCAmelCase : Union[str, Any] = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE )
def a ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__SCREAMING_SNAKE_CASE )
@require_tokenizers
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
__magic_name__ = [
"Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like "
" i'm going to throw up.\nand why is that?"
]
__magic_name__ = "facebook/blenderbot_small-90M"
@cached_property
def a ( self ):
'''simple docstring'''
return BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
@cached_property
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.tokenizer(self.src_text , return_tensors='tf' )
_lowerCAmelCase : Optional[Any] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__SCREAMING_SNAKE_CASE , )
_lowerCAmelCase : Optional[int] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__SCREAMING_SNAKE_CASE )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 362 |
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = 0 # The first color of the flag.
lowerCAmelCase : Optional[int] = 1 # The second color of the flag.
lowerCAmelCase : int = 2 # The third color of the flag.
lowerCAmelCase : Any = (red, white, blue)
def lowercase (_A ):
"""simple docstring"""
if not sequence:
return []
if len(_A ) == 1:
return list(_A )
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : List[str] = len(_A ) - 1
_lowerCAmelCase : Optional[Any] = 0
while mid <= high:
if sequence[mid] == colors[0]:
_lowerCAmelCase , _lowerCAmelCase : Tuple = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
_lowerCAmelCase , _lowerCAmelCase : Tuple = sequence[high], sequence[mid]
high -= 1
else:
_lowerCAmelCase : Optional[int] = f'The elements inside the sequence must contains only {colors} values'
raise ValueError(_A )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : str = input("""Enter numbers separated by commas:\n""").strip()
lowerCAmelCase : Dict = [int(item.strip()) for item in user_input.split(""",""")]
print(F'''{dutch_national_flag_sort(unsorted)}''')
| 25 | 0 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def lowercase (_A , _A=False ):
"""simple docstring"""
_lowerCAmelCase : Tuple = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'module.blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'module.blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'module.blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'module.blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'module.blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'module.blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'module.blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'module.blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'module.blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'module.blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('module.cls_token', 'vit.embeddings.cls_token'),
('module.patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('module.patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('module.pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('module.norm.weight', 'layernorm.weight'),
('module.norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCAmelCase : Tuple = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def lowercase (_A , _A , _A=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCAmelCase : Any = ""
else:
_lowerCAmelCase : List[str] = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCAmelCase : List[str] = state_dict.pop(f'module.blocks.{i}.attn.qkv.weight' )
_lowerCAmelCase : str = state_dict.pop(f'module.blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase : Optional[int] = in_proj_weight[
: config.hidden_size, :
]
_lowerCAmelCase : int = in_proj_bias[: config.hidden_size]
_lowerCAmelCase : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCAmelCase : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCAmelCase : int = in_proj_weight[
-config.hidden_size :, :
]
_lowerCAmelCase : Dict = in_proj_bias[-config.hidden_size :]
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(__snake_case , __snake_case )
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : int = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(__snake_case , __snake_case )
def lowercase (_A , _A , _A ):
"""simple docstring"""
_lowerCAmelCase : int = dct.pop(__snake_case )
_lowerCAmelCase : Optional[Any] = val
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : int = ViTMSNConfig()
_lowerCAmelCase : int = 1_0_0_0
_lowerCAmelCase : Dict = "datasets/huggingface/label-files"
_lowerCAmelCase : List[str] = "imagenet-1k-id2label.json"
_lowerCAmelCase : Optional[Any] = json.load(open(hf_hub_download(__snake_case , __snake_case ) , 'r' ) )
_lowerCAmelCase : str = {int(__snake_case ): v for k, v in idalabel.items()}
_lowerCAmelCase : str = idalabel
_lowerCAmelCase : List[Any] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
_lowerCAmelCase : List[str] = 3_8_4
_lowerCAmelCase : str = 1_5_3_6
_lowerCAmelCase : Dict = 6
elif "l16" in checkpoint_url:
_lowerCAmelCase : Optional[int] = 1_0_2_4
_lowerCAmelCase : List[str] = 4_0_9_6
_lowerCAmelCase : List[str] = 2_4
_lowerCAmelCase : Any = 1_6
_lowerCAmelCase : Optional[int] = 0.1
elif "b4" in checkpoint_url:
_lowerCAmelCase : List[str] = 4
elif "l7" in checkpoint_url:
_lowerCAmelCase : Dict = 7
_lowerCAmelCase : int = 1_0_2_4
_lowerCAmelCase : List[Any] = 4_0_9_6
_lowerCAmelCase : List[str] = 2_4
_lowerCAmelCase : Optional[int] = 1_6
_lowerCAmelCase : Union[str, Any] = 0.1
_lowerCAmelCase : Optional[Any] = ViTMSNModel(__snake_case )
_lowerCAmelCase : Union[str, Any] = torch.hub.load_state_dict_from_url(__snake_case , map_location='cpu' )["target_encoder"]
_lowerCAmelCase : List[str] = ViTImageProcessor(size=config.image_size )
remove_projection_head(__snake_case )
_lowerCAmelCase : Optional[int] = create_rename_keys(__snake_case , base_model=__snake_case )
for src, dest in rename_keys:
rename_key(__snake_case , __snake_case , __snake_case )
read_in_q_k_v(__snake_case , __snake_case , base_model=__snake_case )
model.load_state_dict(__snake_case )
model.eval()
_lowerCAmelCase : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCAmelCase : Dict = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
_lowerCAmelCase : List[Any] = ViTImageProcessor(
size=config.image_size , image_mean=__snake_case , image_std=__snake_case )
_lowerCAmelCase : str = image_processor(images=__snake_case , return_tensors='pt' )
# forward pass
torch.manual_seed(2 )
_lowerCAmelCase : Dict = model(**__snake_case )
_lowerCAmelCase : Dict = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
_lowerCAmelCase : str = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
_lowerCAmelCase : int = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
_lowerCAmelCase : int = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
_lowerCAmelCase : Any = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
_lowerCAmelCase : str = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , __snake_case , atol=1E-4 )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(__snake_case )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
lowerCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar""",
type=str,
help="""URL of the checkpoint you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowerCAmelCase : Dict = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 363 |
'''simple docstring'''
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Optional[int] = [3_1, 2_8, 3_1, 3_0, 3_1, 3_0, 3_1, 3_1, 3_0, 3_1, 3_0, 3_1]
_lowerCAmelCase : int = 6
_lowerCAmelCase : Dict = 1
_lowerCAmelCase : Optional[int] = 1_9_0_1
_lowerCAmelCase : Optional[Any] = 0
while year < 2_0_0_1:
day += 7
if (year % 4 == 0 and year % 1_0_0 != 0) or (year % 4_0_0 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
_lowerCAmelCase : List[str] = day - days_per_month[month - 2]
elif day > 2_9 and month == 2:
month += 1
_lowerCAmelCase : List[str] = day - 2_9
else:
if day > days_per_month[month - 1]:
month += 1
_lowerCAmelCase : List[str] = day - days_per_month[month - 2]
if month > 1_2:
year += 1
_lowerCAmelCase : Optional[int] = 1
if year < 2_0_0_1 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 25 | 0 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase__ ( __snake_case , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = LongformerTokenizer
__magic_name__ = True
__magic_name__ = LongformerTokenizerFast
__magic_name__ = True
def a ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase : List[str] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_lowerCAmelCase : List[Any] = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
_lowerCAmelCase : List[str] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_lowerCAmelCase : List[str] = {"""unk_token""": """<unk>"""}
_lowerCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCamelCase_ ) )
def a ( self , **snake_case__ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def a ( self , **snake_case__ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = """lower newer"""
_lowerCAmelCase : Optional[Any] = """lower newer"""
return input_text, output_text
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCAmelCase : int = """lower newer"""
_lowerCAmelCase : int = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
_lowerCAmelCase : List[str] = tokenizer.tokenize(lowerCamelCase_ ) # , add_prefix_space=True)
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_lowerCAmelCase : Union[str, Any] = tokens + [tokenizer.unk_token]
_lowerCAmelCase : Dict = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , lowerCamelCase_ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=lowerCamelCase_ ) , [0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=lowerCamelCase_ ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , )
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' )
_lowerCAmelCase : Dict = tokenizer.encode('sequence builders' , add_special_tokens=lowerCamelCase_ )
_lowerCAmelCase : str = tokenizer.encode('multi-sequence build' , add_special_tokens=lowerCamelCase_ )
_lowerCAmelCase : Optional[Any] = tokenizer.encode(
'sequence builders' , add_special_tokens=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ )
_lowerCAmelCase : List[str] = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ )
_lowerCAmelCase : Dict = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ )
_lowerCAmelCase : str = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ , lowerCamelCase_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.get_tokenizer()
_lowerCAmelCase : List[str] = """Encode this sequence."""
_lowerCAmelCase : Optional[int] = tokenizer.byte_encoder[""" """.encode('utf-8' )[0]]
# Testing encoder arguments
_lowerCAmelCase : Dict = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ )
_lowerCAmelCase : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowerCamelCase_ , lowerCamelCase_ )
_lowerCAmelCase : Any = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ )
_lowerCAmelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
_lowerCAmelCase : str = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
_lowerCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowerCamelCase_ , lowerCamelCase_ )
# Testing spaces after special tokens
_lowerCAmelCase : List[str] = """<mask>"""
tokenizer.add_special_tokens(
{'mask_token': AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ )} ) # mask token has a left space
_lowerCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
_lowerCAmelCase : int = """Encode <mask> sequence"""
_lowerCAmelCase : Optional[int] = """Encode <mask>sequence"""
_lowerCAmelCase : Dict = tokenizer.encode(lowerCamelCase_ )
_lowerCAmelCase : Optional[Any] = encoded.index(lowerCamelCase_ )
_lowerCAmelCase : str = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
_lowerCAmelCase : Union[str, Any] = tokenizer.encode(lowerCamelCase_ )
_lowerCAmelCase : Union[str, Any] = encoded.index(lowerCamelCase_ )
_lowerCAmelCase : str = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowerCamelCase_ , lowerCamelCase_ )
def a ( self ):
'''simple docstring'''
pass
def a ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
_lowerCAmelCase : str = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
_lowerCAmelCase : List[str] = """A, <mask> AllenNLP sentence."""
_lowerCAmelCase : List[str] = tokenizer_r.encode_plus(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ )
_lowerCAmelCase : List[Any] = tokenizer_p.encode_plus(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
_lowerCAmelCase : List[str] = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
_lowerCAmelCase : Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
lowerCamelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
lowerCamelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def a ( self ):
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
_lowerCAmelCase : int = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , trim_offsets=lowerCamelCase_ )
_lowerCAmelCase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_lowerCAmelCase : Tuple = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , lowerCamelCase_ )
self.assertEqual(post_processor_state['add_prefix_space'] , lowerCamelCase_ )
self.assertEqual(post_processor_state['trim_offsets'] , lowerCamelCase_ )
def a ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCAmelCase : int = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
_lowerCAmelCase : List[str] = F'{text_of_1_token} {text_of_1_token}'
_lowerCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ , use_fast=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , trim_offsets=lowerCamelCase_ )
_lowerCAmelCase : Optional[int] = tokenizer_r(lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase_ ) + 1, len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) , )
_lowerCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ , use_fast=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , trim_offsets=lowerCamelCase_ )
_lowerCAmelCase : str = tokenizer_r(lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase_ ) + 1, len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) , )
_lowerCAmelCase : str = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ , use_fast=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , trim_offsets=lowerCamelCase_ )
_lowerCAmelCase : List[str] = tokenizer_r(lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase_ ), len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) , )
_lowerCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ , use_fast=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , trim_offsets=lowerCamelCase_ )
_lowerCAmelCase : Optional[Any] = tokenizer_r(lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase_ ), len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) , )
_lowerCAmelCase : List[Any] = F' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_lowerCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ , use_fast=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , trim_offsets=lowerCamelCase_ )
_lowerCAmelCase : Any = tokenizer_r(lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase_ ) + 1, 1 + len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) , )
_lowerCAmelCase : str = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ , use_fast=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , trim_offsets=lowerCamelCase_ )
_lowerCAmelCase : Any = tokenizer_r(lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase_ ), 1 + len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) , )
_lowerCAmelCase : int = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ , use_fast=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , trim_offsets=lowerCamelCase_ )
_lowerCAmelCase : Any = tokenizer_r(lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase_ ), 1 + len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) , )
| 364 |
'''simple docstring'''
def lowercase (_A = 1_0_0_0_0_0_0 ):
"""simple docstring"""
_lowerCAmelCase : Any = set(range(3 , _A , 2 ) )
primes.add(2 )
for p in range(3 , _A , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _A , _A ) ) )
_lowerCAmelCase : Union[str, Any] = [float(_A ) for n in range(limit + 1 )]
for p in primes:
for n in range(_A , limit + 1 , _A ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 25 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = KandinskyVaaImgaImgPipeline
__magic_name__ = ["""image_embeds""", """negative_image_embeds""", """image"""]
__magic_name__ = [
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
__magic_name__ = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
__magic_name__ = False
@property
def a ( self ):
'''simple docstring'''
return 32
@property
def a ( self ):
'''simple docstring'''
return 32
@property
def a ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def a ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def a ( self ):
'''simple docstring'''
return 100
@property
def a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_lowerCAmelCase : Any = UNetaDConditionModel(**_lowercase )
return model
@property
def a ( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.dummy_unet
_lowerCAmelCase : Any = self.dummy_movq
_lowerCAmelCase : Tuple = {
'num_train_timesteps': 1000,
'beta_schedule': 'linear',
'beta_start': 0.0_0085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
_lowerCAmelCase : Tuple = DDIMScheduler(**_lowercase )
_lowerCAmelCase : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def a ( self , snake_case__ , snake_case__=0 ):
'''simple docstring'''
_lowerCAmelCase : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowercase ) ).to(_lowercase )
_lowerCAmelCase : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_lowercase )
# create init_image
_lowerCAmelCase : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowercase ) ).to(_lowercase )
_lowerCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase : List[str] = Image.fromarray(np.uinta(_lowercase ) ).convert('RGB' ).resize((256, 256) )
if str(_lowercase ).startswith('mps' ):
_lowerCAmelCase : Any = torch.manual_seed(_lowercase )
else:
_lowerCAmelCase : Any = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
_lowerCAmelCase : Dict = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = 'cpu'
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Optional[int] = self.pipeline_class(**_lowercase )
_lowerCAmelCase : Tuple = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
_lowerCAmelCase : Optional[int] = pipe(**self.get_dummy_inputs(_lowercase ) )
_lowerCAmelCase : Optional[Any] = output.images
_lowerCAmelCase : Optional[int] = pipe(
**self.get_dummy_inputs(_lowercase ) , return_dict=_lowercase , )[0]
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
_lowerCAmelCase : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : Union[str, Any] = np.array(
[0.619_9778, 0.6398_4406, 0.4614_5785, 0.6294_4984, 0.562_2215, 0.4730_6132, 0.4744_1456, 0.460_7606, 0.4871_9263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_img2img_frog.npy' )
_lowerCAmelCase : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
_lowerCAmelCase : Optional[Any] = 'A red cartoon frog, 4k'
_lowerCAmelCase : List[Any] = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(_lowercase )
_lowerCAmelCase : Optional[int] = KandinskyVaaImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder' , torch_dtype=torch.floataa )
_lowerCAmelCase : Union[str, Any] = pipeline.to(_lowercase )
pipeline.set_progress_bar_config(disable=_lowercase )
_lowerCAmelCase : Union[str, Any] = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase , _lowerCAmelCase : Any = pipe_prior(
_lowercase , generator=_lowercase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
_lowerCAmelCase : List[str] = pipeline(
image=_lowercase , image_embeds=_lowercase , negative_image_embeds=_lowercase , generator=_lowercase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='np' , )
_lowerCAmelCase : List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowercase , _lowercase )
| 365 |
'''simple docstring'''
import argparse
import os
import re
lowerCAmelCase : Tuple = """src/transformers"""
# Pattern that looks at the indentation in a line.
lowerCAmelCase : str = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCAmelCase : str = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCAmelCase : Optional[int] = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCAmelCase : List[str] = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCAmelCase : Optional[int] = re.compile(r"""\[([^\]]+)\]""")
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : int = _re_indent.search(_A )
return "" if search is None else search.groups()[0]
def lowercase (_A , _A="" , _A=None , _A=None ):
"""simple docstring"""
_lowerCAmelCase : int = 0
_lowerCAmelCase : Dict = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(_A ):
index += 1
_lowerCAmelCase : Dict = ['\n'.join(lines[:index] )]
else:
_lowerCAmelCase : str = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowerCAmelCase : List[Any] = [lines[index]]
index += 1
while index < len(_A ) and (end_prompt is None or not lines[index].startswith(_A )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_A ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(_A ) )
if index < len(_A ) - 1:
_lowerCAmelCase : Union[str, Any] = [lines[index + 1]]
index += 1
else:
_lowerCAmelCase : Union[str, Any] = []
else:
blocks.append('\n'.join(_A ) )
_lowerCAmelCase : List[str] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_A ) > 0:
blocks.append('\n'.join(_A ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_A ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def lowercase (_A ):
"""simple docstring"""
def _inner(_A ):
return key(_A ).lower().replace('_' , '' )
return _inner
def lowercase (_A , _A=None ):
"""simple docstring"""
def noop(_A ):
return x
if key is None:
_lowerCAmelCase : List[Any] = noop
# Constants are all uppercase, they go first.
_lowerCAmelCase : List[Any] = [obj for obj in objects if key(_A ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowerCAmelCase : Tuple = [obj for obj in objects if key(_A )[0].isupper() and not key(_A ).isupper()]
# Functions begin with a lowercase, they go last.
_lowerCAmelCase : List[str] = [obj for obj in objects if not key(_A )[0].isupper()]
_lowerCAmelCase : Dict = ignore_underscore(_A )
return sorted(_A , key=_A ) + sorted(_A , key=_A ) + sorted(_A , key=_A )
def lowercase (_A ):
"""simple docstring"""
def _replace(_A ):
_lowerCAmelCase : Dict = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
_lowerCAmelCase : Union[str, Any] = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : int = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(_A )] ) + "]"
_lowerCAmelCase : Tuple = import_statement.split('\n' )
if len(_A ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowerCAmelCase : Optional[Any] = 2 if lines[1].strip() == '[' else 1
_lowerCAmelCase : List[str] = [(i, _re_strip_line.search(_A ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowerCAmelCase : Dict = sort_objects(_A , key=lambda _A : x[1] )
_lowerCAmelCase : Tuple = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_A ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowerCAmelCase : Tuple = _re_bracket_content.sub(_replace , lines[1] )
else:
_lowerCAmelCase : Optional[Any] = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : List[str] = keys[:-1]
_lowerCAmelCase : Optional[Any] = get_indent(lines[1] ) + ', '.join([f'"{k}"' for k in sort_objects(_A )] )
return "\n".join(_A )
else:
# Finally we have to deal with imports fitting on one line
_lowerCAmelCase : Union[str, Any] = _re_bracket_content.sub(_replace , _A )
return import_statement
def lowercase (_A , _A=True ):
"""simple docstring"""
with open(_A , encoding='utf-8' ) as f:
_lowerCAmelCase : Any = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowerCAmelCase : Tuple = split_code_in_indented_blocks(
_A , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_A ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowerCAmelCase : Tuple = main_blocks[block_idx]
_lowerCAmelCase : int = block.split('\n' )
# Get to the start of the imports.
_lowerCAmelCase : Tuple = 0
while line_idx < len(_A ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowerCAmelCase : Dict = len(_A )
else:
line_idx += 1
if line_idx >= len(_A ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowerCAmelCase : str = '\n'.join(block_lines[line_idx:-1] )
_lowerCAmelCase : Tuple = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowerCAmelCase : List[Any] = split_code_in_indented_blocks(_A , indent_level=_A )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowerCAmelCase : Optional[int] = _re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowerCAmelCase : int = [(pattern.search(_A ).groups()[0] if pattern.search(_A ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowerCAmelCase : Dict = [(i, key) for i, key in enumerate(_A ) if key is not None]
_lowerCAmelCase : Optional[int] = [x[0] for x in sorted(_A , key=lambda _A : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowerCAmelCase : int = 0
_lowerCAmelCase : Optional[Any] = []
for i in range(len(_A ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
_lowerCAmelCase : Optional[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(_A )
count += 1
# And we put our main block back together with its first and last line.
_lowerCAmelCase : Optional[int] = '\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(_A ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(_A , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(_A ) )
def lowercase (_A=True ):
"""simple docstring"""
_lowerCAmelCase : int = []
for root, _, files in os.walk(_A ):
if "__init__.py" in files:
_lowerCAmelCase : Optional[Any] = sort_imports(os.path.join(_A , '__init__.py' ) , check_only=_A )
if result:
_lowerCAmelCase : Optional[int] = [os.path.join(_A , '__init__.py' )]
if len(_A ) > 0:
raise ValueError(f'Would overwrite {len(_A )} files, run `make style`.' )
if __name__ == "__main__":
lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
lowerCAmelCase : List[str] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 25 | 0 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
lowerCAmelCase : Any = """examples/"""
lowerCAmelCase : List[Any] = {
"""examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""),
"""doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
lowerCAmelCase : Any = {
"""init""": """src/diffusers/__init__.py""",
"""setup""": """setup.py""",
}
lowerCAmelCase : Optional[int] = """README.md"""
def lowercase (_A , _A , _A ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
_lowerCAmelCase : List[Any] = f.read()
_lowerCAmelCase , _lowerCAmelCase : List[str] = REPLACE_PATTERNS[pattern]
_lowerCAmelCase : str = replace.replace('VERSION' , SCREAMING_SNAKE_CASE__ )
_lowerCAmelCase : Union[str, Any] = re_pattern.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
def lowercase (_A ):
"""simple docstring"""
for folder, directories, fnames in os.walk(SCREAMING_SNAKE_CASE__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , pattern='examples' )
def lowercase (_A , _A=False ):
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not patch:
update_version_in_examples(SCREAMING_SNAKE_CASE__ )
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : str = '🤗 Transformers currently provides the following architectures'
_lowerCAmelCase : Optional[int] = '1. Want to contribute a new model?'
with open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
_lowerCAmelCase : Optional[int] = f.readlines()
# Find the start of the list.
_lowerCAmelCase : str = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_lowerCAmelCase : Optional[int] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
_lowerCAmelCase : int = lines[index].replace(
'https://huggingface.co/docs/diffusers/main/model_doc' , 'https://huggingface.co/docs/diffusers/model_doc' , )
index += 1
with open(SCREAMING_SNAKE_CASE__ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(SCREAMING_SNAKE_CASE__ )
def lowercase ():
"""simple docstring"""
with open(REPLACE_FILES['init'] , 'r' ) as f:
_lowerCAmelCase : Optional[Any] = f.read()
_lowerCAmelCase : Any = REPLACE_PATTERNS['init'][0].search(SCREAMING_SNAKE_CASE__ ).groups()[0]
return packaging.version.parse(SCREAMING_SNAKE_CASE__ )
def lowercase (_A=False ):
"""simple docstring"""
_lowerCAmelCase : Dict = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
_lowerCAmelCase : Tuple = default_version.base_version
elif patch:
_lowerCAmelCase : str = f'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
_lowerCAmelCase : Tuple = f'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
_lowerCAmelCase : Any = input(f'Which version are you releasing? [{default_version}]' )
if len(SCREAMING_SNAKE_CASE__ ) == 0:
_lowerCAmelCase : Any = default_version
print(f'Updating version to {version}.' )
global_version_update(SCREAMING_SNAKE_CASE__ , patch=SCREAMING_SNAKE_CASE__ )
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : int = get_version()
_lowerCAmelCase : Dict = f'{current_version.major}.{current_version.minor + 1}.0.dev0'
_lowerCAmelCase : Optional[Any] = current_version.base_version
# Check with the user we got that right.
_lowerCAmelCase : List[Any] = input(f'Which version are we developing now? [{dev_version}]' )
if len(SCREAMING_SNAKE_CASE__ ) == 0:
_lowerCAmelCase : Optional[int] = dev_version
print(f'Updating version to {version}.' )
global_version_update(SCREAMING_SNAKE_CASE__ )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
lowerCAmelCase : Optional[Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 366 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = KandinskyVaaInpaintPipeline
__magic_name__ = ["image_embeds", "negative_image_embeds", "image", "mask_image"]
__magic_name__ = [
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
__magic_name__ = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__magic_name__ = False
@property
def a ( self ):
'''simple docstring'''
return 32
@property
def a ( self ):
'''simple docstring'''
return 32
@property
def a ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def a ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def a ( self ):
'''simple docstring'''
return 100
@property
def a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_lowerCAmelCase : Union[str, Any] = UNetaDConditionModel(**snake_case__ )
return model
@property
def a ( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Dict = VQModel(**self.dummy_movq_kwargs )
return model
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.dummy_unet
_lowerCAmelCase : List[Any] = self.dummy_movq
_lowerCAmelCase : Union[str, Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , steps_offset=1 , prediction_type='epsilon' , thresholding=snake_case__ , )
_lowerCAmelCase : Any = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def a ( self , snake_case__ , snake_case__=0 ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
_lowerCAmelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case__ )
# create init_image
_lowerCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
_lowerCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(snake_case__ ) ).convert('RGB' ).resize((256, 256) )
# create mask
_lowerCAmelCase : List[str] = np.ones((64, 64) , dtype=np.floataa )
_lowerCAmelCase : Dict = 0
if str(snake_case__ ).startswith('mps' ):
_lowerCAmelCase : Optional[Any] = torch.manual_seed(snake_case__ )
else:
_lowerCAmelCase : List[Any] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
_lowerCAmelCase : Optional[int] = {
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = 'cpu'
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Dict = self.pipeline_class(**snake_case__ )
_lowerCAmelCase : Optional[int] = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
_lowerCAmelCase : Union[str, Any] = pipe(**self.get_dummy_inputs(snake_case__ ) )
_lowerCAmelCase : int = output.images
_lowerCAmelCase : int = pipe(
**self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0]
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
_lowerCAmelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
print(F'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : List[str] = np.array(
[0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def a ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' )
_lowerCAmelCase : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
_lowerCAmelCase : Dict = np.ones((768, 768) , dtype=np.floataa )
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : List[str] = 'a hat'
_lowerCAmelCase : Any = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(snake_case__ )
_lowerCAmelCase : Union[str, Any] = KandinskyVaaInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa )
_lowerCAmelCase : Optional[Any] = pipeline.to(snake_case__ )
pipeline.set_progress_bar_config(disable=snake_case__ )
_lowerCAmelCase : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase , _lowerCAmelCase : Dict = pipe_prior(
snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
_lowerCAmelCase : Optional[Any] = pipeline(
image=snake_case__ , mask_image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=100 , height=768 , width=768 , output_type='np' , )
_lowerCAmelCase : Union[str, Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 25 | 0 |
'''simple docstring'''
lowerCAmelCase : Optional[int] = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
lowerCAmelCase : Union[str, Any] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def lowercase (_A , _A , _A ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : Union[str, Any] = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(_A , _A , _A )
order.append(_A )
return order
def lowercase (_A , _A , _A ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = True
_lowerCAmelCase : Tuple = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(_A , _A , _A )
return component
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : int = len(_A ) * [False]
_lowerCAmelCase : dict[int, list[int]] = {vert: [] for vert in range(len(_A ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(_A )
_lowerCAmelCase : int = []
for i, was_visited in enumerate(_A ):
if not was_visited:
order += topology_sort(_A , _A , _A )
_lowerCAmelCase : Any = []
_lowerCAmelCase : Union[str, Any] = len(_A ) * [False]
for i in range(len(_A ) ):
_lowerCAmelCase : Tuple = order[len(_A ) - i - 1]
if not visited[vert]:
_lowerCAmelCase : Union[str, Any] = find_components(_A , _A , _A )
components_list.append(_A )
return components_list
| 367 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def lowercase (_A ):
"""simple docstring"""
if not postfix_notation:
return 0
_lowerCAmelCase : int = {'+', '-', '*', '/'}
_lowerCAmelCase : list[Any] = []
for token in postfix_notation:
if token in operations:
_lowerCAmelCase , _lowerCAmelCase : Tuple = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(_A ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 0 |
'''simple docstring'''
from __future__ import annotations
lowerCAmelCase : Union[str, Any] = tuple[int, int, int]
lowerCAmelCase : Dict = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
lowerCAmelCase : Dict = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
# -------------------------- default selection --------------------------
# rotors --------------------------
lowerCAmelCase : int = """EGZWVONAHDCLFQMSIPJBYUKXTR"""
lowerCAmelCase : List[str] = """FOBHMDKEXQNRAULPGSJVTYICZW"""
lowerCAmelCase : Dict = """ZJXESIUQLHAVRMDOYGTNFWPBKC"""
# reflector --------------------------
lowerCAmelCase : Dict = {
"""A""": """N""",
"""N""": """A""",
"""B""": """O""",
"""O""": """B""",
"""C""": """P""",
"""P""": """C""",
"""D""": """Q""",
"""Q""": """D""",
"""E""": """R""",
"""R""": """E""",
"""F""": """S""",
"""S""": """F""",
"""G""": """T""",
"""T""": """G""",
"""H""": """U""",
"""U""": """H""",
"""I""": """V""",
"""V""": """I""",
"""J""": """W""",
"""W""": """J""",
"""K""": """X""",
"""X""": """K""",
"""L""": """Y""",
"""Y""": """L""",
"""M""": """Z""",
"""Z""": """M""",
}
# -------------------------- extra rotors --------------------------
lowerCAmelCase : str = """RMDJXFUWGISLHVTCQNKYPBEZOA"""
lowerCAmelCase : Dict = """SGLCPQWZHKXAREONTFBVIYJUDM"""
lowerCAmelCase : List[Any] = """HVSICLTYKQUBXDWAJZOMFGPREN"""
lowerCAmelCase : Union[str, Any] = """RZWQHFMVDBKICJLNTUXAGYPSOE"""
lowerCAmelCase : Any = """LFKIJODBEGAMQPXVUHYSTCZRWN"""
lowerCAmelCase : List[str] = """KOAEGVDHXPQZMLFTYWJNBRCIUS"""
def lowercase (_A , _A , _A ):
"""simple docstring"""
if (unique_rotsel := len(set(a__ ) )) < 3:
_lowerCAmelCase : Optional[int] = f'Please use 3 unique rotors (not {unique_rotsel})'
raise Exception(a__ )
# Checks if rotor positions are valid
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = rotpos
if not 0 < rotorposa <= len(a__ ):
_lowerCAmelCase : List[str] = f'First rotor position is not within range of 1..26 ({rotorposa}'
raise ValueError(a__ )
if not 0 < rotorposa <= len(a__ ):
_lowerCAmelCase : Dict = f'Second rotor position is not within range of 1..26 ({rotorposa})'
raise ValueError(a__ )
if not 0 < rotorposa <= len(a__ ):
_lowerCAmelCase : Dict = f'Third rotor position is not within range of 1..26 ({rotorposa})'
raise ValueError(a__ )
# Validates string and returns dict
_lowerCAmelCase : List[Any] = _plugboard(a__ )
return rotpos, rotsel, pbdict
def lowercase (_A ):
"""simple docstring"""
if not isinstance(a__ , a__ ):
_lowerCAmelCase : Dict = f'Plugboard setting isn\'t type string ({type(a__ )})'
raise TypeError(a__ )
elif len(a__ ) % 2 != 0:
_lowerCAmelCase : Optional[Any] = f'Odd number of symbols ({len(a__ )})'
raise Exception(a__ )
elif pbstring == "":
return {}
pbstring.replace(' ' , '' )
# Checks if all characters are unique
_lowerCAmelCase : Optional[int] = set()
for i in pbstring:
if i not in abc:
_lowerCAmelCase : Union[str, Any] = f'\'{i}\' not in list of symbols'
raise Exception(a__ )
elif i in tmppbl:
_lowerCAmelCase : str = f'Duplicate symbol ({i})'
raise Exception(a__ )
else:
tmppbl.add(a__ )
del tmppbl
# Created the dictionary
_lowerCAmelCase : List[Any] = {}
for j in range(0 , len(a__ ) - 1 , 2 ):
_lowerCAmelCase : Optional[int] = pbstring[j + 1]
_lowerCAmelCase : List[str] = pbstring[j]
return pb
def lowercase (_A , _A , _A = (rotora, rotora, rotora) , _A = "" , ):
"""simple docstring"""
_lowerCAmelCase : str = text.upper()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = _validator(
a__ , a__ , plugb.upper() )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = rotor_position
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
_lowerCAmelCase : Any = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
_lowerCAmelCase : Optional[int] = plugboard[symbol]
# rotor ra --------------------------
_lowerCAmelCase : Union[str, Any] = abc.index(a__ ) + rotorposa
_lowerCAmelCase : Optional[int] = rotora[index % len(a__ )]
# rotor rb --------------------------
_lowerCAmelCase : List[Any] = abc.index(a__ ) + rotorposa
_lowerCAmelCase : Dict = rotora[index % len(a__ )]
# rotor rc --------------------------
_lowerCAmelCase : Optional[int] = abc.index(a__ ) + rotorposa
_lowerCAmelCase : Optional[Any] = rotora[index % len(a__ )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
_lowerCAmelCase : Optional[int] = reflector[symbol]
# 2nd rotors
_lowerCAmelCase : Tuple = abc[rotora.index(a__ ) - rotorposa]
_lowerCAmelCase : Union[str, Any] = abc[rotora.index(a__ ) - rotorposa]
_lowerCAmelCase : Optional[int] = abc[rotora.index(a__ ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
_lowerCAmelCase : List[str] = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(a__ ):
_lowerCAmelCase : Dict = 0
rotorposa += 1
if rotorposa >= len(a__ ):
_lowerCAmelCase : Union[str, Any] = 0
rotorposa += 1
if rotorposa >= len(a__ ):
_lowerCAmelCase : str = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(a__ )
return "".join(a__ )
if __name__ == "__main__":
lowerCAmelCase : List[Any] = """This is my Python script that emulates the Enigma machine from WWII."""
lowerCAmelCase : str = (1, 1, 1)
lowerCAmelCase : Tuple = """pictures"""
lowerCAmelCase : Tuple = (rotora, rotora, rotora)
lowerCAmelCase : Tuple = enigma(message, rotor_pos, rotor_sel, pb)
print("""Encrypted message:""", en)
print("""Decrypted message:""", enigma(en, rotor_pos, rotor_sel, pb))
| 368 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] = {
"""google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""",
"""google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""",
"""google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "mobilenet_v2"
def __init__( self , snake_case__=3 , snake_case__=224 , snake_case__=1.0 , snake_case__=8 , snake_case__=8 , snake_case__=6 , snake_case__=32 , snake_case__=True , snake_case__=True , snake_case__="relu6" , snake_case__=True , snake_case__=0.8 , snake_case__=0.02 , snake_case__=0.001 , snake_case__=255 , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Union[str, Any] = image_size
_lowerCAmelCase : List[Any] = depth_multiplier
_lowerCAmelCase : List[Any] = depth_divisible_by
_lowerCAmelCase : Optional[Any] = min_depth
_lowerCAmelCase : str = expand_ratio
_lowerCAmelCase : str = output_stride
_lowerCAmelCase : Any = first_layer_is_expansion
_lowerCAmelCase : int = finegrained_output
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : List[str] = tf_padding
_lowerCAmelCase : Optional[int] = classifier_dropout_prob
_lowerCAmelCase : int = initializer_range
_lowerCAmelCase : Optional[int] = layer_norm_eps
_lowerCAmelCase : str = semantic_loss_ignore_index
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = version.parse("1.11" )
@property
def a ( self ):
'''simple docstring'''
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def a ( self ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def a ( self ):
'''simple docstring'''
return 1E-4
| 25 | 0 |
'''simple docstring'''
import math
def lowercase (_A , _A ):
"""simple docstring"""
if (
not isinstance(_lowerCamelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * power_factor
def lowercase (_A , _A ):
"""simple docstring"""
if (
not isinstance(_lowerCamelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369 |
'''simple docstring'''
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = SMALL_MODEL_IDENTIFIER
_lowerCAmelCase : Optional[int] = 'pt'
_lowerCAmelCase : Tuple = 'tf'
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Tuple = TFAutoModel.from_pretrained(self.test_model , from_pt=snake_case__ )
model_tf.save_pretrained(snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = 'mock_framework'
# Framework provided - return whatever the user provides
_lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model , snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(snake_case__ )
_lowerCAmelCase : Dict = FeaturesManager.determine_framework(snake_case__ , snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(snake_case__ )
_lowerCAmelCase : int = FeaturesManager.determine_framework(snake_case__ , snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
def a ( self ):
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(snake_case__ )
_lowerCAmelCase : Tuple = FeaturesManager.determine_framework(snake_case__ )
self.assertEqual(snake_case__ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(snake_case__ )
_lowerCAmelCase : Optional[int] = FeaturesManager.determine_framework(snake_case__ )
self.assertEqual(snake_case__ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(snake_case__ ):
_lowerCAmelCase : str = FeaturesManager.determine_framework(snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = MagicMock(return_value=snake_case__ )
with patch('transformers.onnx.features.is_tf_available' , snake_case__ ):
_lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(snake_case__ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
_lowerCAmelCase : Any = MagicMock(return_value=snake_case__ )
with patch('transformers.onnx.features.is_torch_available' , snake_case__ ):
_lowerCAmelCase : Union[str, Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(snake_case__ , self.framework_tf )
# Both in environment -> use PyTorch
_lowerCAmelCase : int = MagicMock(return_value=snake_case__ )
_lowerCAmelCase : Optional[int] = MagicMock(return_value=snake_case__ )
with patch('transformers.onnx.features.is_tf_available' , snake_case__ ), patch(
'transformers.onnx.features.is_torch_available' , snake_case__ ):
_lowerCAmelCase : Dict = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(snake_case__ , self.framework_pt )
# Both not in environment -> raise error
_lowerCAmelCase : str = MagicMock(return_value=snake_case__ )
_lowerCAmelCase : Optional[Any] = MagicMock(return_value=snake_case__ )
with patch('transformers.onnx.features.is_tf_available' , snake_case__ ), patch(
'transformers.onnx.features.is_torch_available' , snake_case__ ):
with self.assertRaises(snake_case__ ):
_lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model )
| 25 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
__magic_name__ = "CIDAS/clipseg-rd64-refined"
__magic_name__ = "image_segmenter"
__magic_name__ = CLIPSegForImageSegmentation
__magic_name__ = ["image", "text"]
__magic_name__ = ["image"]
def __init__( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
requires_backends(self , ['vision'] )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
def a ( self , snake_case__ ):
'''simple docstring'''
with torch.no_grad():
_lowerCAmelCase : List[Any] = self.model(**_SCREAMING_SNAKE_CASE ).logits
return logits
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : int = outputs.cpu().detach().numpy()
_lowerCAmelCase : Any = 0
_lowerCAmelCase : Optional[Any] = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 370 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : Any = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : List[str] = {
"""facebook/nllb-large-en-ro""": 10_24,
"""facebook/nllb-200-distilled-600M""": 10_24,
}
# fmt: off
lowerCAmelCase : Optional[int] = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = ["input_ids", "attention_mask"]
__magic_name__ = NllbTokenizer
__magic_name__ = []
__magic_name__ = []
def __init__( self , snake_case__=None , snake_case__=None , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=False , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
_lowerCAmelCase : Dict = legacy_behaviour
super().__init__(
vocab_file=snake_case__ , tokenizer_file=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , src_lang=snake_case__ , tgt_lang=snake_case__ , additional_special_tokens=snake_case__ , legacy_behaviour=snake_case__ , **snake_case__ , )
_lowerCAmelCase : List[str] = vocab_file
_lowerCAmelCase : int = False if not self.vocab_file else True
_lowerCAmelCase : str = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
_lowerCAmelCase : Any = {
lang_code: self.convert_tokens_to_ids(snake_case__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_lowerCAmelCase : List[Any] = src_lang if src_lang is not None else 'eng_Latn'
_lowerCAmelCase : str = self.convert_tokens_to_ids(self._src_lang )
_lowerCAmelCase : Tuple = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def a ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
_lowerCAmelCase : Optional[Any] = src_lang
_lowerCAmelCase : Union[str, Any] = self(snake_case__ , add_special_tokens=snake_case__ , return_tensors=snake_case__ , **snake_case__ )
_lowerCAmelCase : int = self.convert_tokens_to_ids(snake_case__ )
_lowerCAmelCase : Optional[Any] = tgt_lang_id
return inputs
def a ( self , snake_case__ , snake_case__ = "eng_Latn" , snake_case__ = None , snake_case__ = "fra_Latn" , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[str] = src_lang
_lowerCAmelCase : Optional[int] = tgt_lang
return super().prepare_seqaseq_batch(snake_case__ , snake_case__ , **snake_case__ )
def a ( self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def a ( self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = self.convert_tokens_to_ids(snake_case__ )
if self.legacy_behaviour:
_lowerCAmelCase : Dict = []
_lowerCAmelCase : List[str] = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCAmelCase : int = [self.cur_lang_code]
_lowerCAmelCase : int = [self.eos_token_id]
_lowerCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
_lowerCAmelCase : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
_lowerCAmelCase : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.convert_tokens_to_ids(snake_case__ )
if self.legacy_behaviour:
_lowerCAmelCase : int = []
_lowerCAmelCase : Dict = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCAmelCase : int = [self.cur_lang_code]
_lowerCAmelCase : List[str] = [self.eos_token_id]
_lowerCAmelCase : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
_lowerCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
_lowerCAmelCase : str = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(snake_case__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
_lowerCAmelCase : Union[str, Any] = os.path.join(
snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
return (out_vocab_file,)
| 25 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : Union[str, Any] = {
"configuration_mobilebert": [
"MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileBertConfig",
"MobileBertOnnxConfig",
],
"tokenization_mobilebert": ["MobileBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = ["MobileBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
"MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileBertForMaskedLM",
"MobileBertForMultipleChoice",
"MobileBertForNextSentencePrediction",
"MobileBertForPreTraining",
"MobileBertForQuestionAnswering",
"MobileBertForSequenceClassification",
"MobileBertForTokenClassification",
"MobileBertLayer",
"MobileBertModel",
"MobileBertPreTrainedModel",
"load_tf_weights_in_mobilebert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
"TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileBertForMaskedLM",
"TFMobileBertForMultipleChoice",
"TFMobileBertForNextSentencePrediction",
"TFMobileBertForPreTraining",
"TFMobileBertForQuestionAnswering",
"TFMobileBertForSequenceClassification",
"TFMobileBertForTokenClassification",
"TFMobileBertMainLayer",
"TFMobileBertModel",
"TFMobileBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 371 |
'''simple docstring'''
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
lowerCAmelCase : List[str] = [
"""kernels/rwkv/wkv_cuda.cu""",
"""kernels/rwkv/wkv_op.cpp""",
"""kernels/deformable_detr/ms_deform_attn.h""",
"""kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh""",
"""models/graphormer/algos_graphormer.pyx""",
]
def lowercase (_A ):
"""simple docstring"""
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument("""--check_lib""", action="""store_true""", help="""Whether to check the build or the actual package.""")
lowerCAmelCase : Dict = parser.parse_args()
if args.check_lib:
lowerCAmelCase : Union[str, Any] = importlib.import_module("""transformers""")
lowerCAmelCase : int = Path(transformers_module.__file__).parent
else:
lowerCAmelCase : int = Path.cwd() / """build/lib/transformers"""
if not test_custom_files_are_present(transformers_path):
raise ValueError("""The built release does not contain the custom files. Fix this before going further!""")
| 25 | 0 |
'''simple docstring'''
from __future__ import annotations
lowerCAmelCase = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def lowercase (_A , _A , _A , _A , _A , ):
"""simple docstring"""
_lowerCAmelCase : int = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_SCREAMING_SNAKE_CASE ) )
] # the reference grid
_lowerCAmelCase : Tuple = 1
_lowerCAmelCase : str = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_SCREAMING_SNAKE_CASE ) )
] # the action grid
_lowerCAmelCase : Tuple = init[0]
_lowerCAmelCase : List[Any] = init[1]
_lowerCAmelCase : Optional[Any] = 0
_lowerCAmelCase : Optional[Any] = g + heuristic[x][y] # cost from starting cell to destination cell
_lowerCAmelCase : List[str] = [[f, g, x, y]]
_lowerCAmelCase : Tuple = False # flag that is set when search is complete
_lowerCAmelCase : Union[str, Any] = False # flag set if we can't find expand
while not found and not resign:
if len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
_lowerCAmelCase : Dict = cell.pop()
_lowerCAmelCase : Tuple = next_cell[2]
_lowerCAmelCase : str = next_cell[3]
_lowerCAmelCase : List[Any] = next_cell[1]
if x == goal[0] and y == goal[1]:
_lowerCAmelCase : Optional[Any] = True
else:
for i in range(len(_SCREAMING_SNAKE_CASE ) ): # to try out different valid actions
_lowerCAmelCase : Union[str, Any] = x + DIRECTIONS[i][0]
_lowerCAmelCase : Optional[Any] = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_SCREAMING_SNAKE_CASE ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
_lowerCAmelCase : Any = g + cost
_lowerCAmelCase : Optional[Any] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
_lowerCAmelCase : List[Any] = 1
_lowerCAmelCase : List[str] = i
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : List[Any] = goal[0]
_lowerCAmelCase : str = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
_lowerCAmelCase : Optional[int] = x - DIRECTIONS[action[x][y]][0]
_lowerCAmelCase : Optional[int] = y - DIRECTIONS[action[x][y]][1]
_lowerCAmelCase : Optional[Any] = xa
_lowerCAmelCase : List[str] = ya
invpath.append([x, y] )
_lowerCAmelCase : Tuple = []
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
path.append(invpath[len(_SCREAMING_SNAKE_CASE ) - 1 - i] )
return path, action
if __name__ == "__main__":
lowerCAmelCase = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
lowerCAmelCase = [0, 0]
# all coordinates are given in format [y,x]
lowerCAmelCase = [len(grid) - 1, len(grid[0]) - 1]
lowerCAmelCase = 1
# the cost map which pushes the path closer to the goal
lowerCAmelCase = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
lowerCAmelCase = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
lowerCAmelCase = 99
lowerCAmelCase , lowerCAmelCase = search(grid, init, goal, cost, heuristic)
print("""ACTION MAP""")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 350 |
'''simple docstring'''
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
_lowerCAmelCase : List[str] = ''
_lowerCAmelCase : Any = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(_A ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = 0, 0
# length[i] shows the length of palindromic substring with center i
_lowerCAmelCase : List[str] = [1 for i in range(len(_A ) )]
# for each character in new_string find corresponding palindromic string
_lowerCAmelCase : Any = 0
for j in range(len(_A ) ):
_lowerCAmelCase : Optional[Any] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(_A )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_lowerCAmelCase : List[str] = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_lowerCAmelCase : Optional[Any] = j - k + 1 # noqa: E741
_lowerCAmelCase : int = j + k - 1
# update max_length and start position
if max_length < length[j]:
_lowerCAmelCase : Dict = length[j]
_lowerCAmelCase : Optional[int] = j
# create that string
_lowerCAmelCase : List[str] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 0 |
'''simple docstring'''
from collections.abc import Callable
def lowercase (_A , _A , _A ):
"""simple docstring"""
_lowerCAmelCase : float = a
_lowerCAmelCase : float = b
if function(UpperCamelCase__ ) == 0: # one of the a or b is a root for the function
return a
elif function(UpperCamelCase__ ) == 0:
return b
elif (
function(UpperCamelCase__ ) * function(UpperCamelCase__ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
_lowerCAmelCase : float = start + (end - start) / 2.0
while abs(start - mid ) > 1_0**-7: # until precisely equals to 10^-7
if function(UpperCamelCase__ ) == 0:
return mid
elif function(UpperCamelCase__ ) * function(UpperCamelCase__ ) < 0:
_lowerCAmelCase : Tuple = mid
else:
_lowerCAmelCase : Dict = mid
_lowerCAmelCase : Optional[int] = start + (end - start) / 2.0
return mid
def lowercase (_A ):
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 10_00))
import doctest
doctest.testmod()
| 351 |
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = 0
__magic_name__ = False
__magic_name__ = 3.0
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} )
self.assertDictEqual(MockClass(a=2 , b=snake_case__ ).to_kwargs() , {'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} )
@require_cuda
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
_lowerCAmelCase : Dict = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
_lowerCAmelCase : str = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , snake_case__ )
@require_multi_gpu
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(snake_case__ , env=os.environ.copy() )
if __name__ == "__main__":
lowerCAmelCase : int = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
lowerCAmelCase : Tuple = Accelerator(kwargs_handlers=[ddp_scaler])
lowerCAmelCase : Optional[Any] = torch.nn.Linear(1_00, 2_00)
lowerCAmelCase : List[str] = accelerator.prepare(model)
# Check the values changed in kwargs
lowerCAmelCase : List[Any] = """"""
lowerCAmelCase : Tuple = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 25 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Tuple = {
'configuration_nllb_moe': [
'NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP',
'NllbMoeConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int = [
'NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST',
'NllbMoeForConditionalGeneration',
'NllbMoeModel',
'NllbMoePreTrainedModel',
'NllbMoeTop2Router',
'NllbMoeSparseMLP',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
lowerCAmelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 352 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] = {
"""CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": (
"""https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"""
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "trajectory_transformer"
__magic_name__ = ["past_key_values"]
__magic_name__ = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , snake_case__=100 , snake_case__=5 , snake_case__=1 , snake_case__=1 , snake_case__=249 , snake_case__=6 , snake_case__=17 , snake_case__=25 , snake_case__=4 , snake_case__=4 , snake_case__=128 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.0006 , snake_case__=512 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=1 , snake_case__=True , snake_case__=1 , snake_case__=5_0256 , snake_case__=5_0256 , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Any = action_weight
_lowerCAmelCase : Optional[int] = reward_weight
_lowerCAmelCase : Union[str, Any] = value_weight
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : Tuple = block_size
_lowerCAmelCase : List[Any] = action_dim
_lowerCAmelCase : List[Any] = observation_dim
_lowerCAmelCase : Union[str, Any] = transition_dim
_lowerCAmelCase : Tuple = learning_rate
_lowerCAmelCase : int = n_layer
_lowerCAmelCase : Any = n_head
_lowerCAmelCase : Tuple = n_embd
_lowerCAmelCase : Optional[Any] = embd_pdrop
_lowerCAmelCase : Union[str, Any] = attn_pdrop
_lowerCAmelCase : Any = resid_pdrop
_lowerCAmelCase : Optional[Any] = initializer_range
_lowerCAmelCase : List[Any] = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = kaiming_initializer_range
_lowerCAmelCase : List[Any] = use_cache
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
| 25 | 0 |
'''simple docstring'''
import gc
import threading
import time
import psutil
import torch
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = psutil.Process()
_lowerCAmelCase : int = False
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = -1
while True:
_lowerCAmelCase : Tuple = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = True
_lowerCAmelCase : Any = threading.Thread(target=self.peak_monitor )
_lowerCAmelCase : List[str] = True
self.thread.start()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = False
self.thread.join()
return self.cpu_memory_peak
lowerCAmelCase : List[str] = PeakCPUMemory()
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Optional[int] = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
_lowerCAmelCase : Any = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
_lowerCAmelCase : List[Any] = torch.cuda.memory_allocated(_lowerCAmelCase )
torch.cuda.reset_peak_memory_stats()
return measures
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
_lowerCAmelCase : str = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**2_0
_lowerCAmelCase : Optional[int] = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**2_0
# GPU mem
for i in range(torch.cuda.device_count() ):
_lowerCAmelCase : Union[str, Any] = (torch.cuda.memory_allocated(_lowerCAmelCase ) - start_measures[str(_lowerCAmelCase )]) / 2**2_0
_lowerCAmelCase : List[Any] = (torch.cuda.max_memory_allocated(_lowerCAmelCase ) - start_measures[str(_lowerCAmelCase )]) / 2**2_0
return measures
def lowercase (_A , _A ):
"""simple docstring"""
print(f'{description}:' )
print(f'- Time: {measures["time"]:.2f}s' )
for i in range(torch.cuda.device_count() ):
print(f'- GPU {i} allocated: {measures[str(_lowerCAmelCase )]:.2f}MiB' )
_lowerCAmelCase : Dict = measures[f'{i}-peak']
print(f'- GPU {i} peak: {peak:.2f}MiB' )
print(f'- CPU RAM allocated: {measures["cpu"]:.2f}MiB' )
print(f'- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB' )
| 353 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase : Tuple = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowerCAmelCase : Union[str, Any] = 25_00_04
lowerCAmelCase : int = 25_00_20
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = MBartaaTokenizer
__magic_name__ = MBartaaTokenizerFast
__magic_name__ = True
__magic_name__ = True
def a ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : List[Any] = MBartaaTokenizer(snake_case__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = '<s>'
_lowerCAmelCase : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(snake_case__ ) , 1054 )
def a ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1054 )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = MBartaaTokenizer(snake_case__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=snake_case__ )
_lowerCAmelCase : Any = tokenizer.tokenize('This is a test' )
self.assertListEqual(snake_case__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCAmelCase : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
_lowerCAmelCase : Optional[int] = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(
snake_case__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCAmelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = {'input_ids': [[25_0004, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [25_0004, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_0004, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name='facebook/mbart-large-50' , revision='d3913889c59cd5c9e456b269c376325eabad57e2' , )
def a ( self ):
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_lowerCAmelCase : Optional[int] = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart50', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
_lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
_lowerCAmelCase : Tuple = tokenizer_r.save_pretrained(snake_case__ )
_lowerCAmelCase : str = tokenizer_p.save_pretrained(snake_case__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
_lowerCAmelCase : Any = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(snake_case__ , snake_case__ )
# Checks everything loads correctly in the same way
_lowerCAmelCase : List[str] = tokenizer_r.from_pretrained(snake_case__ )
_lowerCAmelCase : Optional[int] = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(snake_case__ )
# Save tokenizer rust, legacy_format=True
_lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp()
_lowerCAmelCase : Dict = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ )
_lowerCAmelCase : Any = tokenizer_p.save_pretrained(snake_case__ )
# Checks it save with the same files
self.assertSequenceEqual(snake_case__ , snake_case__ )
# Checks everything loads correctly in the same way
_lowerCAmelCase : Dict = tokenizer_r.from_pretrained(snake_case__ )
_lowerCAmelCase : List[str] = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
shutil.rmtree(snake_case__ )
# Save tokenizer rust, legacy_format=False
_lowerCAmelCase : Optional[int] = tempfile.mkdtemp()
_lowerCAmelCase : int = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ )
_lowerCAmelCase : Tuple = tokenizer_p.save_pretrained(snake_case__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_lowerCAmelCase : int = tokenizer_r.from_pretrained(snake_case__ )
_lowerCAmelCase : str = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
shutil.rmtree(snake_case__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
__magic_name__ = "facebook/mbart-large-50-one-to-many-mmt"
__magic_name__ = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
__magic_name__ = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
__magic_name__ = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2]
@classmethod
def a ( cls ):
'''simple docstring'''
_lowerCAmelCase : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
_lowerCAmelCase : Dict = 1
return cls
def a ( self ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 25_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 25_0004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 25_0020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['mr_IN'] , 25_0038 )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , snake_case__ )
def a ( self ):
'''simple docstring'''
self.assertIn(snake_case__ , self.tokenizer.all_special_ids )
_lowerCAmelCase : Union[str, Any] = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
_lowerCAmelCase : List[str] = self.tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ )
_lowerCAmelCase : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
self.assertNotIn(self.tokenizer.eos_token , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , snake_case__ )
_lowerCAmelCase : List[str] = 10
_lowerCAmelCase : Any = self.tokenizer(snake_case__ , max_length=snake_case__ , truncation=snake_case__ ).input_ids[0]
self.assertEqual(ids[0] , snake_case__ )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(snake_case__ ) , snake_case__ )
def a ( self ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [25_0053, 25_0001] )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
_lowerCAmelCase : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(snake_case__ )
_lowerCAmelCase : Tuple = MBartaaTokenizer.from_pretrained(snake_case__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , snake_case__ )
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case__ , return_tensors='pt' )
_lowerCAmelCase : Optional[int] = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
_lowerCAmelCase : int = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
_lowerCAmelCase : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , snake_case__ )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.tokenizer(self.src_text , padding=snake_case__ , truncation=snake_case__ , max_length=3 , return_tensors='pt' )
_lowerCAmelCase : str = self.tokenizer(
text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=10 , return_tensors='pt' )
_lowerCAmelCase : List[Any] = targets['input_ids']
_lowerCAmelCase : Any = shift_tokens_right(snake_case__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(snake_case__ ) , {
# en_XX, A, test, EOS
'input_ids': [[25_0004, 62, 3034, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 25_0001,
} , )
| 25 | 0 |
'''simple docstring'''
def lowercase (_A ):
"""simple docstring"""
if n_term == "":
return []
_lowerCAmelCase : Any = []
for temp in range(int(_A ) ):
series.append(f'1/{temp + 1}' if series else '1' )
return series
if __name__ == "__main__":
lowerCAmelCase : str = input("""Enter the last number (nth term) of the Harmonic Series""")
print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""")
print(harmonic_series(nth_term))
| 354 |
'''simple docstring'''
from math import isqrt
def lowercase (_A ):
"""simple docstring"""
return all(number % divisor != 0 for divisor in range(2 , isqrt(_A ) + 1 ) )
def lowercase (_A = 1_0**6 ):
"""simple docstring"""
_lowerCAmelCase : str = 0
_lowerCAmelCase : str = 1
_lowerCAmelCase : List[str] = 7
while prime_candidate < max_prime:
primes_count += is_prime(_A )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 25 | 0 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
lowerCAmelCase : int = (
'4S 3H 2C 7S 5H',
'9D 8H 2C 6S 7H',
'2D 6D 9D TH 7D',
'TC 8C 2S JH 6C',
'JH 8S TH AH QH',
'TS KS 5S 9S AC',
'KD 6S 9D TH AD',
'KS 8D 4D 9S 4S', # pair
'8C 4S KH JS 4D', # pair
'QH 8H KD JH 8S', # pair
'KC 4H KS 2H 8D', # pair
'KD 4S KC 3H 8S', # pair
'AH 8S AS KC JH', # pair
'3H 4C 4H 3S 2H', # 2 pairs
'5S 5D 2C KH KH', # 2 pairs
'3C KH 5D 5S KH', # 2 pairs
'AS 3C KH AD KH', # 2 pairs
'7C 7S 3S 7H 5S', # 3 of a kind
'7C 7S KH 2H 7H', # 3 of a kind
'AC KH QH AH AS', # 3 of a kind
'2H 4D 3C AS 5S', # straight (low ace)
'3C 5C 4C 2C 6H', # straight
'6S 8S 7S 5H 9H', # straight
'JS QS 9H TS KH', # straight
'QC KH TS JS AH', # straight (high ace)
'8C 9C 5C 3C TC', # flush
'3S 8S 9S 5S KS', # flush
'4C 5C 9C 8C KC', # flush
'JH 8H AH KH QH', # flush
'3D 2H 3H 2C 2D', # full house
'2H 2C 3S 3H 3D', # full house
'KH KC 3S 3H 3D', # full house
'JC 6H JS JD JH', # 4 of a kind
'JC 7H JS JD JH', # 4 of a kind
'JC KH JS JD JH', # 4 of a kind
'2S AS 4S 5S 3S', # straight flush (low ace)
'2D 6D 3D 4D 5D', # straight flush
'5C 6C 3C 7C 4C', # straight flush
'JH 9H TH KH QH', # straight flush
'JH AH TH KH QH', # royal flush (high ace straight flush)
)
lowerCAmelCase : str = (
('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'),
('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'),
('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'),
('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'),
('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'),
('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'),
('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'),
('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'),
('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'),
('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'),
('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'),
('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'),
('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'),
('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'),
('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'),
('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'),
('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'),
('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'),
('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'),
('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'),
('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'),
('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'),
('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'),
('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'),
('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'),
('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'),
('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'),
('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'),
('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'),
('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'),
('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'),
)
lowerCAmelCase : Union[str, Any] = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', True),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', False),
('AS 3S 4S 8S 2S', True),
)
lowerCAmelCase : Any = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', False),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', True),
)
lowerCAmelCase : Optional[int] = (
('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]),
('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]),
('JH QD KC AS TS', False, [14, 13, 12, 11, 10]),
('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]),
)
lowerCAmelCase : Any = (
('JH AH TH KH QH', 0),
('JH 9H TH KH QH', 0),
('JC KH JS JD JH', 7),
('KH KC 3S 3H 3D', 6),
('8C 9C 5C 3C TC', 0),
('JS QS 9H TS KH', 0),
('7C 7S KH 2H 7H', 3),
('3C KH 5D 5S KH', 2),
('QH 8H KD JH 8S', 1),
('2D 6D 9D TH 7D', 0),
)
lowerCAmelCase : Tuple = (
('JH AH TH KH QH', 23),
('JH 9H TH KH QH', 22),
('JC KH JS JD JH', 21),
('KH KC 3S 3H 3D', 20),
('8C 9C 5C 3C TC', 19),
('JS QS 9H TS KH', 18),
('7C 7S KH 2H 7H', 17),
('3C KH 5D 5S KH', 16),
('QH 8H KD JH 8S', 15),
('2D 6D 9D TH 7D', 14),
)
def lowercase ():
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase : Dict = randrange(len(__UpperCAmelCase ) ), randrange(len(__UpperCAmelCase ) )
_lowerCAmelCase : Union[str, Any] = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def lowercase (_A = 1_0_0 ):
"""simple docstring"""
return (generate_random_hand() for _ in range(__UpperCAmelCase ))
@pytest.mark.parametrize('hand, expected' , __UpperCAmelCase )
def lowercase (_A , _A ):
"""simple docstring"""
assert PokerHand(__UpperCAmelCase )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , __UpperCAmelCase )
def lowercase (_A , _A ):
"""simple docstring"""
assert PokerHand(__UpperCAmelCase )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , __UpperCAmelCase )
def lowercase (_A , _A , _A ):
"""simple docstring"""
_lowerCAmelCase : Dict = PokerHand(__UpperCAmelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , __UpperCAmelCase )
def lowercase (_A , _A ):
"""simple docstring"""
assert PokerHand(__UpperCAmelCase )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , __UpperCAmelCase )
def lowercase (_A , _A ):
"""simple docstring"""
assert PokerHand(__UpperCAmelCase )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , __UpperCAmelCase )
def lowercase (_A , _A , _A ):
"""simple docstring"""
assert PokerHand(__UpperCAmelCase ).compare_with(PokerHand(__UpperCAmelCase ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def lowercase (_A , _A , _A ):
"""simple docstring"""
assert PokerHand(__UpperCAmelCase ).compare_with(PokerHand(__UpperCAmelCase ) ) == expected
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Tuple = [PokerHand(__UpperCAmelCase ) for hand in SORTED_HANDS]
_lowerCAmelCase : Tuple = poker_hands.copy()
shuffle(__UpperCAmelCase )
_lowerCAmelCase : List[str] = chain(sorted(__UpperCAmelCase ) )
for index, hand in enumerate(__UpperCAmelCase ):
assert hand == poker_hands[index]
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=__UpperCAmelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = PokerHand('2C 4S AS 3D 5C' )
_lowerCAmelCase : Any = True
_lowerCAmelCase : Union[str, Any] = [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = 0
_lowerCAmelCase : Union[str, Any] = os.path.abspath(os.path.dirname(__UpperCAmelCase ) )
_lowerCAmelCase : Any = os.path.join(__UpperCAmelCase , 'poker_hands.txt' )
with open(__UpperCAmelCase ) as file_hand:
for line in file_hand:
_lowerCAmelCase : int = line[:1_4].strip()
_lowerCAmelCase : Union[str, Any] = line[1_5:].strip()
_lowerCAmelCase , _lowerCAmelCase : str = PokerHand(__UpperCAmelCase ), PokerHand(__UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = player.compare_with(__UpperCAmelCase )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 355 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : List[Any] = {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""",
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "mvp"
__magic_name__ = ["past_key_values"]
__magic_name__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , snake_case__=5_0267 , snake_case__=1024 , snake_case__=12 , snake_case__=4096 , snake_case__=16 , snake_case__=12 , snake_case__=4096 , snake_case__=16 , snake_case__=0.0 , snake_case__=0.0 , snake_case__="gelu" , snake_case__=1024 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=0.0 , snake_case__=False , snake_case__=True , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__=True , snake_case__=2 , snake_case__=2 , snake_case__=False , snake_case__=100 , snake_case__=800 , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Any = max_position_embeddings
_lowerCAmelCase : Optional[Any] = d_model
_lowerCAmelCase : Optional[int] = encoder_ffn_dim
_lowerCAmelCase : Optional[int] = encoder_layers
_lowerCAmelCase : Any = encoder_attention_heads
_lowerCAmelCase : Any = decoder_ffn_dim
_lowerCAmelCase : Optional[Any] = decoder_layers
_lowerCAmelCase : int = decoder_attention_heads
_lowerCAmelCase : Union[str, Any] = dropout
_lowerCAmelCase : List[Any] = attention_dropout
_lowerCAmelCase : List[str] = activation_dropout
_lowerCAmelCase : Optional[Any] = activation_function
_lowerCAmelCase : Any = init_std
_lowerCAmelCase : Any = encoder_layerdrop
_lowerCAmelCase : Union[str, Any] = decoder_layerdrop
_lowerCAmelCase : Optional[int] = classifier_dropout
_lowerCAmelCase : List[Any] = use_cache
_lowerCAmelCase : Optional[int] = encoder_layers
_lowerCAmelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCAmelCase : Optional[Any] = use_prompt
_lowerCAmelCase : Optional[Any] = prompt_length
_lowerCAmelCase : Any = prompt_mid_dim
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , decoder_start_token_id=snake_case__ , forced_eos_token_id=snake_case__ , **snake_case__ , )
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , snake_case__ ):
_lowerCAmelCase : Any = self.bos_token_id
warnings.warn(
F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'The config can simply be saved and uploaded again to be fixed.' )
| 25 | 0 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=32 , snake_case__=2 , snake_case__=3 , snake_case__=16 , snake_case__=[1, 2, 1] , snake_case__=[2, 2, 4] , snake_case__=2 , snake_case__=2.0 , snake_case__=True , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.1 , snake_case__="gelu" , snake_case__=False , snake_case__=True , snake_case__=0.02 , snake_case__=1E-5 , snake_case__=True , snake_case__=None , snake_case__=True , snake_case__=10 , snake_case__=8 , ):
'''simple docstring'''
_lowerCAmelCase : Dict = parent
_lowerCAmelCase : List[str] = batch_size
_lowerCAmelCase : Tuple = image_size
_lowerCAmelCase : Any = patch_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : Tuple = embed_dim
_lowerCAmelCase : str = depths
_lowerCAmelCase : List[Any] = num_heads
_lowerCAmelCase : str = window_size
_lowerCAmelCase : List[Any] = mlp_ratio
_lowerCAmelCase : Optional[int] = qkv_bias
_lowerCAmelCase : Tuple = hidden_dropout_prob
_lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCAmelCase : Union[str, Any] = drop_path_rate
_lowerCAmelCase : Union[str, Any] = hidden_act
_lowerCAmelCase : int = use_absolute_embeddings
_lowerCAmelCase : Any = patch_norm
_lowerCAmelCase : Optional[int] = layer_norm_eps
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : Optional[int] = is_training
_lowerCAmelCase : int = scope
_lowerCAmelCase : List[Any] = use_labels
_lowerCAmelCase : Tuple = type_sequence_label_size
_lowerCAmelCase : Optional[Any] = encoder_stride
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : int = None
if self.use_labels:
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : List[str] = self.get_config()
return config, pixel_values, labels
def a ( self ):
'''simple docstring'''
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def a ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : int = SwinvaModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCAmelCase : Any = model(lowerCamelCase__ )
_lowerCAmelCase : Tuple = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_lowerCAmelCase : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def a ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[str] = SwinvaForMaskedImageModeling(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCAmelCase : Any = model(lowerCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCAmelCase : Union[str, Any] = 1
_lowerCAmelCase : List[Any] = SwinvaForMaskedImageModeling(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCAmelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase : Tuple = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def a ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.type_sequence_label_size
_lowerCAmelCase : Dict = SwinvaForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCAmelCase : List[str] = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = config_and_inputs
_lowerCAmelCase : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
__magic_name__ = (
{'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification}
if is_torch_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = SwinvaModelTester(self )
_lowerCAmelCase : Tuple = ConfigTester(self , config_class=lowerCamelCase__ , embed_dim=37 )
def a ( self ):
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' )
def a ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds' )
def a ( self ):
'''simple docstring'''
pass
def a ( self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : str = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Any = model_class(lowerCamelCase__ )
_lowerCAmelCase : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : str = [*signature.parameters.keys()]
_lowerCAmelCase : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Any = True
for model_class in self.all_model_classes:
_lowerCAmelCase : Any = True
_lowerCAmelCase : Union[str, Any] = False
_lowerCAmelCase : Union[str, Any] = True
_lowerCAmelCase : List[str] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase : Tuple = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
_lowerCAmelCase : Optional[int] = outputs.attentions
_lowerCAmelCase : List[Any] = len(self.model_tester.depths )
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase : Optional[Any] = config.window_size**2
_lowerCAmelCase : str = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase : Optional[int] = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
_lowerCAmelCase : List[str] = outputs.attentions
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
_lowerCAmelCase : int = len(lowerCamelCase__ )
# Check attention is always last and order is fine
_lowerCAmelCase : Tuple = True
_lowerCAmelCase : Optional[Any] = True
_lowerCAmelCase : Optional[int] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase : str = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
if hasattr(self.model_tester , 'num_hidden_states_types' ):
_lowerCAmelCase : Any = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
_lowerCAmelCase : str = 2
self.assertEqual(out_len + added_hidden_states , len(lowerCamelCase__ ) )
_lowerCAmelCase : Any = outputs.attentions
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
_lowerCAmelCase : Any = outputs.hidden_states
_lowerCAmelCase : Union[str, Any] = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
# Swinv2 has a different seq_length
_lowerCAmelCase : Dict = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
_lowerCAmelCase : Optional[int] = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = reshaped_hidden_states[0].shape
_lowerCAmelCase : Optional[Any] = (
reshaped_hidden_states[0].view(lowerCamelCase__ , lowerCamelCase__ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_lowerCAmelCase : Dict = True
self.check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : Union[str, Any] = True
self.check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : str = 3
_lowerCAmelCase : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowerCAmelCase : int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowerCAmelCase : Union[str, Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_lowerCAmelCase : str = True
self.check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : Any = True
self.check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , (padded_height, padded_width) )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@slow
def a ( self ):
'''simple docstring'''
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[int] = SwinvaModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Optional[int] = _config_zero_init(lowerCamelCase__ )
for model_class in self.all_model_classes:
_lowerCAmelCase : Dict = model_class(config=lowerCamelCase__ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def a ( self ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' )
if is_vision_available()
else None
)
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to(
lowerCamelCase__ )
_lowerCAmelCase : Optional[int] = self.default_image_processor
_lowerCAmelCase : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_lowerCAmelCase : Union[str, Any] = image_processor(images=lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : List[str] = model(**lowerCamelCase__ )
# verify the logits
_lowerCAmelCase : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
_lowerCAmelCase : Optional[int] = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
| 356 |
'''simple docstring'''
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"""The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"""
)
lowerCAmelCase : str = None
lowerCAmelCase : Optional[int] = {
"""7B""": 1_10_08,
"""13B""": 1_38_24,
"""30B""": 1_79_20,
"""65B""": 2_20_16,
"""70B""": 2_86_72,
}
lowerCAmelCase : Optional[int] = {
"""7B""": 1,
"""7Bf""": 1,
"""13B""": 2,
"""13Bf""": 2,
"""30B""": 4,
"""65B""": 8,
"""70B""": 8,
"""70Bf""": 8,
}
def lowercase (_A , _A=1 , _A=2_5_6 ):
"""simple docstring"""
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def lowercase (_A ):
"""simple docstring"""
with open(_A , 'r' ) as f:
return json.load(_A )
def lowercase (_A , _A ):
"""simple docstring"""
with open(_A , 'w' ) as f:
json.dump(_A , _A )
def lowercase (_A , _A , _A , _A=True ):
"""simple docstring"""
os.makedirs(_A , exist_ok=_A )
_lowerCAmelCase : Optional[Any] = os.path.join(_A , 'tmp' )
os.makedirs(_A , exist_ok=_A )
_lowerCAmelCase : Any = read_json(os.path.join(_A , 'params.json' ) )
_lowerCAmelCase : List[str] = NUM_SHARDS[model_size]
_lowerCAmelCase : str = params['n_layers']
_lowerCAmelCase : Optional[int] = params['n_heads']
_lowerCAmelCase : int = n_heads // num_shards
_lowerCAmelCase : Optional[int] = params['dim']
_lowerCAmelCase : Union[str, Any] = dim // n_heads
_lowerCAmelCase : Union[str, Any] = 10_000.0
_lowerCAmelCase : str = 1.0 / (base ** (torch.arange(0 , _A , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
_lowerCAmelCase : Optional[Any] = params['n_kv_heads'] # for GQA / MQA
_lowerCAmelCase : str = n_heads_per_shard // num_key_value_heads
_lowerCAmelCase : Optional[int] = dim // num_key_value_heads
else: # compatibility with other checkpoints
_lowerCAmelCase : Union[str, Any] = n_heads
_lowerCAmelCase : Any = n_heads_per_shard
_lowerCAmelCase : Optional[Any] = dim
# permute for sliced rotary
def permute(_A , _A=n_heads , _A=dim , _A=dim ):
return w.view(_A , dima // n_heads // 2 , 2 , _A ).transpose(1 , 2 ).reshape(_A , _A )
print(f'Fetching all parameters from the checkpoint at {input_base_path}.' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
_lowerCAmelCase : List[Any] = torch.load(os.path.join(_A , 'consolidated.00.pth' ) , map_location='cpu' )
else:
# Sharded
_lowerCAmelCase : List[Any] = [
torch.load(os.path.join(_A , f'consolidated.{i:02d}.pth' ) , map_location='cpu' )
for i in range(_A )
]
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Union[str, Any] = {'weight_map': {}}
for layer_i in range(_A ):
_lowerCAmelCase : List[str] = f'pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
_lowerCAmelCase : str = {
f'model.layers.{layer_i}.self_attn.q_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wq.weight'] ),
f'model.layers.{layer_i}.self_attn.k_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wk.weight'] ),
f'model.layers.{layer_i}.self_attn.v_proj.weight': loaded[f'layers.{layer_i}.attention.wv.weight'],
f'model.layers.{layer_i}.self_attn.o_proj.weight': loaded[f'layers.{layer_i}.attention.wo.weight'],
f'model.layers.{layer_i}.mlp.gate_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w1.weight'],
f'model.layers.{layer_i}.mlp.down_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w2.weight'],
f'model.layers.{layer_i}.mlp.up_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w3.weight'],
f'model.layers.{layer_i}.input_layernorm.weight': loaded[f'layers.{layer_i}.attention_norm.weight'],
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[f'layers.{layer_i}.ffn_norm.weight'],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
_lowerCAmelCase : str = {
f'model.layers.{layer_i}.input_layernorm.weight': loaded[0][
f'layers.{layer_i}.attention_norm.weight'
].clone(),
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[0][
f'layers.{layer_i}.ffn_norm.weight'
].clone(),
}
_lowerCAmelCase : List[str] = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wq.weight'].view(_A , _A , _A )
for i in range(_A )
] , dim=0 , ).reshape(_A , _A ) )
_lowerCAmelCase : Optional[int] = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wk.weight'].view(
_A , _A , _A )
for i in range(_A )
] , dim=0 , ).reshape(_A , _A ) , _A , _A , _A , )
_lowerCAmelCase : Dict = torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wv.weight'].view(
_A , _A , _A )
for i in range(_A )
] , dim=0 , ).reshape(_A , _A )
_lowerCAmelCase : Dict = torch.cat(
[loaded[i][f'layers.{layer_i}.attention.wo.weight'] for i in range(_A )] , dim=1 )
_lowerCAmelCase : List[Any] = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w1.weight'] for i in range(_A )] , dim=0 )
_lowerCAmelCase : Tuple = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w2.weight'] for i in range(_A )] , dim=1 )
_lowerCAmelCase : List[Any] = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w3.weight'] for i in range(_A )] , dim=0 )
_lowerCAmelCase : int = inv_freq
for k, v in state_dict.items():
_lowerCAmelCase : Optional[Any] = filename
param_count += v.numel()
torch.save(_A , os.path.join(_A , _A ) )
_lowerCAmelCase : Dict = f'pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
_lowerCAmelCase : List[str] = {
'model.embed_tokens.weight': loaded['tok_embeddings.weight'],
'model.norm.weight': loaded['norm.weight'],
'lm_head.weight': loaded['output.weight'],
}
else:
_lowerCAmelCase : List[str] = {
'model.norm.weight': loaded[0]['norm.weight'],
'model.embed_tokens.weight': torch.cat(
[loaded[i]['tok_embeddings.weight'] for i in range(_A )] , dim=1 ),
'lm_head.weight': torch.cat([loaded[i]['output.weight'] for i in range(_A )] , dim=0 ),
}
for k, v in state_dict.items():
_lowerCAmelCase : int = filename
param_count += v.numel()
torch.save(_A , os.path.join(_A , _A ) )
# Write configs
_lowerCAmelCase : Tuple = {'total_size': param_count * 2}
write_json(_A , os.path.join(_A , 'pytorch_model.bin.index.json' ) )
_lowerCAmelCase : Optional[int] = params['ffn_dim_multiplier'] if 'ffn_dim_multiplier' in params else 1
_lowerCAmelCase : int = params['multiple_of'] if 'multiple_of' in params else 2_5_6
_lowerCAmelCase : List[Any] = LlamaConfig(
hidden_size=_A , intermediate_size=compute_intermediate_size(_A , _A , _A ) , num_attention_heads=params['n_heads'] , num_hidden_layers=params['n_layers'] , rms_norm_eps=params['norm_eps'] , num_key_value_heads=_A , )
config.save_pretrained(_A )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('Loading the checkpoint in a Llama model.' )
_lowerCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained(_A , torch_dtype=torch.floataa , low_cpu_mem_usage=_A )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('Saving in the Transformers format.' )
model.save_pretrained(_A , safe_serialization=_A )
shutil.rmtree(_A )
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'Saving a {tokenizer_class.__name__} to {tokenizer_path}.' )
_lowerCAmelCase : List[Any] = tokenizer_class(_A )
tokenizer.save_pretrained(_A )
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument(
'--input_dir' , help='Location of LLaMA weights, which contains tokenizer.model and model folders' , )
parser.add_argument(
'--model_size' , choices=['7B', '7Bf', '13B', '13Bf', '30B', '65B', '70B', '70Bf', 'tokenizer_only'] , )
parser.add_argument(
'--output_dir' , help='Location to write HF model and tokenizer' , )
parser.add_argument('--safe_serialization' , type=_A , help='Whether or not to save using `safetensors`.' )
_lowerCAmelCase : Any = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
_lowerCAmelCase : Dict = os.path.join(args.input_dir , 'tokenizer.model' )
write_tokenizer(args.output_dir , _A )
if __name__ == "__main__":
main()
| 25 | 0 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class UpperCamelCase__ ( a_ ):
"""simple docstring"""
__magic_name__ = "SpeechT5FeatureExtractor"
__magic_name__ = "SpeechT5Tokenizer"
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
super().__init__(lowercase_ , lowercase_ )
def __call__( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = kwargs.pop('audio' , lowercase_ )
_lowerCAmelCase : Tuple = kwargs.pop('text' , lowercase_ )
_lowerCAmelCase : Optional[int] = kwargs.pop('text_target' , lowercase_ )
_lowerCAmelCase : Tuple = kwargs.pop('audio_target' , lowercase_ )
_lowerCAmelCase : List[Any] = kwargs.pop('sampling_rate' , lowercase_ )
if audio is not None and text is not None:
raise ValueError(
'Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?' )
if audio_target is not None and text_target is not None:
raise ValueError(
'Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.' )
if audio is not None:
_lowerCAmelCase : Tuple = self.feature_extractor(lowercase_ , *lowercase_ , sampling_rate=lowercase_ , **lowercase_ )
elif text is not None:
_lowerCAmelCase : Any = self.tokenizer(lowercase_ , **lowercase_ )
else:
_lowerCAmelCase : Dict = None
if audio_target is not None:
_lowerCAmelCase : int = self.feature_extractor(audio_target=lowercase_ , *lowercase_ , sampling_rate=lowercase_ , **lowercase_ )
_lowerCAmelCase : Tuple = targets['''input_values''']
elif text_target is not None:
_lowerCAmelCase : Union[str, Any] = self.tokenizer(lowercase_ , **lowercase_ )
_lowerCAmelCase : int = targets['''input_ids''']
else:
_lowerCAmelCase : List[Any] = None
if inputs is None:
return targets
if targets is not None:
_lowerCAmelCase : List[Any] = labels
_lowerCAmelCase : List[Any] = targets.get('attention_mask' )
if decoder_attention_mask is not None:
_lowerCAmelCase : Tuple = decoder_attention_mask
return inputs
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : int = kwargs.pop('input_values' , lowercase_ )
_lowerCAmelCase : Dict = kwargs.pop('input_ids' , lowercase_ )
_lowerCAmelCase : Dict = kwargs.pop('labels' , lowercase_ )
if input_values is not None and input_ids is not None:
raise ValueError('Cannot process both `input_values` and `input_ids` inputs.' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.' )
if input_values is not None:
_lowerCAmelCase : List[str] = self.feature_extractor.pad(lowercase_ , *lowercase_ , **lowercase_ )
elif input_ids is not None:
_lowerCAmelCase : Optional[Any] = self.tokenizer.pad(lowercase_ , **lowercase_ )
else:
_lowerCAmelCase : Tuple = None
if labels is not None:
if "input_ids" in labels or (isinstance(lowercase_ , lowercase_ ) and "input_ids" in labels[0]):
_lowerCAmelCase : Optional[int] = self.tokenizer.pad(lowercase_ , **lowercase_ )
_lowerCAmelCase : str = targets['''input_ids''']
else:
_lowerCAmelCase : Any = self.feature_extractor.feature_size
_lowerCAmelCase : Optional[int] = self.feature_extractor.num_mel_bins
_lowerCAmelCase : str = self.feature_extractor.pad(lowercase_ , *lowercase_ , **lowercase_ )
_lowerCAmelCase : Any = feature_size_hack
_lowerCAmelCase : int = targets['''input_values''']
else:
_lowerCAmelCase : List[Any] = None
if inputs is None:
return targets
if targets is not None:
_lowerCAmelCase : int = labels
_lowerCAmelCase : List[Any] = targets.get('attention_mask' )
if decoder_attention_mask is not None:
_lowerCAmelCase : Any = decoder_attention_mask
return inputs
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
| 357 |
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
__magic_name__ = None
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = None
__magic_name__ = None
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = True
__magic_name__ = None
__magic_name__ = 1
__magic_name__ = None
__magic_name__ = False
__magic_name__ = None
__magic_name__ = None
def a ( self ):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(snake_case__ ) for k, v in self.__dict__.items()} )
| 25 | 0 |
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "pixel_values"
__magic_name__ = False
__magic_name__ = TimmBackboneConfig
def __init__( self , snake_case__ , **snake_case__ ):
'''simple docstring'''
requires_backends(self , 'timm' )
super().__init__(__A )
_lowerCAmelCase : Optional[Any] = config
if config.backbone is None:
raise ValueError('backbone is not set in the config. Please set it to a timm model name.' )
if config.backbone not in timm.list_models():
raise ValueError(F'backbone {config.backbone} is not supported by timm.' )
if hasattr(__A , 'out_features' ) and config.out_features is not None:
raise ValueError('out_features is not supported by TimmBackbone. Please use out_indices instead.' )
_lowerCAmelCase : Union[str, Any] = getattr(__A , 'use_pretrained_backbone' , __A )
if pretrained is None:
raise ValueError('use_pretrained_backbone is not set in the config. Please set it to True or False.' )
# We just take the final layer by default. This matches the default for the transformers models.
_lowerCAmelCase : List[Any] = config.out_indices if getattr(__A , 'out_indices' , __A ) is not None else (-1,)
_lowerCAmelCase : Tuple = timm.create_model(
config.backbone , pretrained=__A , features_only=config.features_only , in_chans=config.num_channels , out_indices=__A , **__A , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
_lowerCAmelCase : Dict = self._backbone.return_layers
_lowerCAmelCase : List[str] = {layer['module']: str(__A ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(__A )
@classmethod
def a ( cls , snake_case__ , *snake_case__ , **snake_case__ ):
'''simple docstring'''
requires_backends(cls , ['vision', 'timm'] )
from ...models.timm_backbone import TimmBackboneConfig
_lowerCAmelCase : Union[str, Any] = kwargs.pop('config' , TimmBackboneConfig() )
_lowerCAmelCase : List[Any] = kwargs.pop('use_timm_backbone' , __A )
if not use_timm:
raise ValueError('use_timm_backbone must be True for timm backbones' )
_lowerCAmelCase : Any = kwargs.pop('num_channels' , config.num_channels )
_lowerCAmelCase : str = kwargs.pop('features_only' , config.features_only )
_lowerCAmelCase : List[Any] = kwargs.pop('use_pretrained_backbone' , config.use_pretrained_backbone )
_lowerCAmelCase : str = kwargs.pop('out_indices' , config.out_indices )
_lowerCAmelCase : Optional[Any] = TimmBackboneConfig(
backbone=__A , num_channels=__A , features_only=__A , use_pretrained_backbone=__A , out_indices=__A , )
return super()._from_config(__A , **__A )
def a ( self , snake_case__ ):
'''simple docstring'''
pass
def a ( self , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=None , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase : str = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('Cannot output attentions for timm backbones at the moment' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
_lowerCAmelCase : Union[str, Any] = self._all_layers
_lowerCAmelCase : Tuple = self._backbone(__A , **__A )
_lowerCAmelCase : Optional[int] = self._return_layers
_lowerCAmelCase : List[str] = tuple(hidden_states[i] for i in self.out_indices )
else:
_lowerCAmelCase : Optional[Any] = self._backbone(__A , **__A )
_lowerCAmelCase : Any = None
_lowerCAmelCase : Union[str, Any] = tuple(__A )
_lowerCAmelCase : Optional[Any] = tuple(__A ) if hidden_states is not None else None
if not return_dict:
_lowerCAmelCase : List[str] = (feature_maps,)
if output_hidden_states:
_lowerCAmelCase : str = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=__A , hidden_states=__A , attentions=__A )
| 358 |
'''simple docstring'''
lowerCAmelCase : List[str] = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCAmelCase : int = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCAmelCase : List[str] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 25 | 0 |
'''simple docstring'''
import os
def lowercase (_A = "input.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(_A ) , _A ) ) as input_file:
_lowerCAmelCase : Dict = [
[int(_A ) for element in line.split(',' )]
for line in input_file.readlines()
]
_lowerCAmelCase : str = len(_A )
_lowerCAmelCase : List[Any] = len(matrix[0] )
_lowerCAmelCase : str = [[-1 for _ in range(_A )] for _ in range(_A )]
for i in range(_A ):
_lowerCAmelCase : Dict = matrix[i][0]
for j in range(1 , _A ):
for i in range(_A ):
_lowerCAmelCase : Optional[int] = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , _A ):
_lowerCAmelCase : Union[str, Any] = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
_lowerCAmelCase : Any = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 359 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase : Union[str, Any] = {
"""configuration_resnet""": ["""RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ResNetConfig""", """ResNetOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = [
"""RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ResNetForImageClassification""",
"""ResNetModel""",
"""ResNetPreTrainedModel""",
"""ResNetBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
"""TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFResNetForImageClassification""",
"""TFResNetModel""",
"""TFResNetPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
"""FlaxResNetForImageClassification""",
"""FlaxResNetModel""",
"""FlaxResNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
lowerCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 25 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=2 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=16 , snake_case__=2 , snake_case__=0.02 , snake_case__=3 , snake_case__=4 , snake_case__=None , ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = parent
_lowerCAmelCase : int = 13
_lowerCAmelCase : Tuple = 7
_lowerCAmelCase : Dict = True
_lowerCAmelCase : Optional[Any] = True
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : Any = True
_lowerCAmelCase : List[str] = 99
_lowerCAmelCase : List[str] = 32
_lowerCAmelCase : str = 2
_lowerCAmelCase : List[Any] = 4
_lowerCAmelCase : List[str] = 37
_lowerCAmelCase : Union[str, Any] = 'gelu'
_lowerCAmelCase : List[str] = 0.1
_lowerCAmelCase : List[Any] = 0.1
_lowerCAmelCase : Dict = 512
_lowerCAmelCase : List[Any] = 16
_lowerCAmelCase : Dict = 2
_lowerCAmelCase : Optional[int] = 0.02
_lowerCAmelCase : List[str] = 3
_lowerCAmelCase : Tuple = 4
_lowerCAmelCase : Tuple = None
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Union[str, Any] = None
if self.use_input_mask:
_lowerCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase : Optional[Any] = None
if self.use_token_type_ids:
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : Tuple = None
_lowerCAmelCase : int = None
if self.use_labels:
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase : str = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=lowerCamelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = TFRoFormerModel(config=lowerCamelCase_ )
_lowerCAmelCase : str = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowerCAmelCase : Optional[Any] = [input_ids, input_mask]
_lowerCAmelCase : Optional[int] = model(lowerCamelCase_ )
_lowerCAmelCase : str = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = True
_lowerCAmelCase : Dict = TFRoFormerForCausalLM(config=lowerCamelCase_ )
_lowerCAmelCase : Tuple = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_lowerCAmelCase : Tuple = model(lowerCamelCase_ )['logits']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = TFRoFormerForMaskedLM(config=lowerCamelCase_ )
_lowerCAmelCase : List[str] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_lowerCAmelCase : str = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.num_labels
_lowerCAmelCase : int = TFRoFormerForSequenceClassification(config=lowerCamelCase_ )
_lowerCAmelCase : Dict = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_lowerCAmelCase : List[str] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.num_choices
_lowerCAmelCase : Dict = TFRoFormerForMultipleChoice(config=lowerCamelCase_ )
_lowerCAmelCase : Tuple = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
_lowerCAmelCase : Any = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
_lowerCAmelCase : Dict = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
_lowerCAmelCase : List[Any] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
_lowerCAmelCase : str = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = self.num_labels
_lowerCAmelCase : Tuple = TFRoFormerForTokenClassification(config=lowerCamelCase_ )
_lowerCAmelCase : Optional[int] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_lowerCAmelCase : Union[str, Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : int = TFRoFormerForQuestionAnswering(config=lowerCamelCase_ )
_lowerCAmelCase : List[str] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_lowerCAmelCase : Dict = model(lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Optional[Any] = config_and_inputs
_lowerCAmelCase : Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
__magic_name__ = (
{
"feature-extraction": TFRoFormerModel,
"fill-mask": TFRoFormerForMaskedLM,
"question-answering": TFRoFormerForQuestionAnswering,
"text-classification": TFRoFormerForSequenceClassification,
"text-generation": TFRoFormerForCausalLM,
"token-classification": TFRoFormerForTokenClassification,
"zero-shot": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = TFRoFormerModelTester(self )
_lowerCAmelCase : List[str] = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def a ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase_ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*lowerCamelCase_ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase_ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base' )
self.assertIsNotNone(lowerCamelCase_ )
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
_lowerCAmelCase : List[str] = tf.constant([[0, 1, 2, 3, 4, 5]] )
_lowerCAmelCase : int = model(lowerCamelCase_ )[0]
# TODO Replace vocab size
_lowerCAmelCase : List[str] = 5_0000
_lowerCAmelCase : Optional[int] = [1, 6, vocab_size]
self.assertEqual(output.shape , lowerCamelCase_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
_lowerCAmelCase : Optional[Any] = tf.constant(
[
[
[-0.1205_3341, -1.026_4901, 0.2922_1946],
[-1.513_3783, 0.19_7433, 0.1519_0607],
[-5.013_5403, -3.90_0256, -0.8403_8764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCamelCase_ , atol=1E-4 )
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
__magic_name__ = 1E-4
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = tf.constant([[4, 10]] )
_lowerCAmelCase : List[str] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
_lowerCAmelCase : List[Any] = emba(input_ids.shape )
_lowerCAmelCase : Dict = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(lowerCamelCase_ , lowerCamelCase_ , atol=self.tolerance )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
_lowerCAmelCase : Dict = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
_lowerCAmelCase : Dict = emba.weight[:3, :5]
tf.debugging.assert_near(lowerCamelCase_ , lowerCamelCase_ , atol=self.tolerance )
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
__magic_name__ = 1E-4
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
_lowerCAmelCase : Optional[Any] = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
_lowerCAmelCase : Dict = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
_lowerCAmelCase : Any = embed_positions([2, 16, 768] )[None, None, :, :]
_lowerCAmelCase , _lowerCAmelCase : List[str] = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_lowerCAmelCase : Tuple = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
_lowerCAmelCase : Optional[int] = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , lowerCamelCase_ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , lowerCamelCase_ , atol=self.tolerance )
| 360 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {
"""shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "nat"
__magic_name__ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , snake_case__=4 , snake_case__=3 , snake_case__=64 , snake_case__=[3, 4, 6, 5] , snake_case__=[2, 4, 8, 16] , snake_case__=7 , snake_case__=3.0 , snake_case__=True , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.1 , snake_case__="gelu" , snake_case__=0.02 , snake_case__=1E-5 , snake_case__=0.0 , snake_case__=None , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Union[str, Any] = patch_size
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Tuple = embed_dim
_lowerCAmelCase : Any = depths
_lowerCAmelCase : Dict = len(snake_case__ )
_lowerCAmelCase : str = num_heads
_lowerCAmelCase : Dict = kernel_size
_lowerCAmelCase : Union[str, Any] = mlp_ratio
_lowerCAmelCase : int = qkv_bias
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCAmelCase : List[str] = drop_path_rate
_lowerCAmelCase : Union[str, Any] = hidden_act
_lowerCAmelCase : Tuple = layer_norm_eps
_lowerCAmelCase : Dict = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase : str = int(embed_dim * 2 ** (len(snake_case__ ) - 1) )
_lowerCAmelCase : Any = layer_scale_init_value
_lowerCAmelCase : Any = ['stem'] + [F'stage{idx}' for idx in range(1 , len(snake_case__ ) + 1 )]
_lowerCAmelCase , _lowerCAmelCase : str = get_aligned_output_features_output_indices(
out_features=snake_case__ , out_indices=snake_case__ , stage_names=self.stage_names )
| 25 | 0 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCAmelCase : List[str] = {"""UserAgent""": UserAgent().random}
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : str = script.contents[0]
_lowerCAmelCase : str = json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : int = F'https://www.instagram.com/{username}/'
_lowerCAmelCase : int = self.get_json()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = requests.get(self.url , headers=snake_case__ ).text
_lowerCAmelCase : Optional[Any] = BeautifulSoup(snake_case__ , 'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
'''simple docstring'''
return F'{self.__class__.__name__}(\'{self.username}\')'
def __str__( self ):
'''simple docstring'''
return F'{self.fullname} ({self.username}) is {self.biography}'
@property
def a ( self ):
'''simple docstring'''
return self.user_data["username"]
@property
def a ( self ):
'''simple docstring'''
return self.user_data["full_name"]
@property
def a ( self ):
'''simple docstring'''
return self.user_data["biography"]
@property
def a ( self ):
'''simple docstring'''
return self.user_data["business_email"]
@property
def a ( self ):
'''simple docstring'''
return self.user_data["external_url"]
@property
def a ( self ):
'''simple docstring'''
return self.user_data["edge_followed_by"]["count"]
@property
def a ( self ):
'''simple docstring'''
return self.user_data["edge_follow"]["count"]
@property
def a ( self ):
'''simple docstring'''
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def a ( self ):
'''simple docstring'''
return self.user_data["profile_pic_url_hd"]
@property
def a ( self ):
'''simple docstring'''
return self.user_data["is_verified"]
@property
def a ( self ):
'''simple docstring'''
return self.user_data["is_private"]
def lowercase (_A = "github" ):
"""simple docstring"""
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
_lowerCAmelCase : Tuple = InstagramUser(__lowerCAmelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __lowerCAmelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_5_0
assert instagram_user.number_of_followers > 1_2_0_0_0_0
assert instagram_user.number_of_followings > 1_5
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : Dict = InstagramUser("""github""")
print(instagram_user)
print(F'''{instagram_user.number_of_posts = }''')
print(F'''{instagram_user.number_of_followers = }''')
print(F'''{instagram_user.number_of_followings = }''')
print(F'''{instagram_user.email = }''')
print(F'''{instagram_user.website = }''')
print(F'''{instagram_user.profile_picture_url = }''')
print(F'''{instagram_user.is_verified = }''')
print(F'''{instagram_user.is_private = }''')
| 361 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Dict = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : str = {
"""vocab_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"""
),
},
"""tokenizer_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""",
"""roberta-base-openai-detector""": (
"""https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"""
),
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : List[str] = {
"""roberta-base""": 5_12,
"""roberta-large""": 5_12,
"""roberta-large-mnli""": 5_12,
"""distilroberta-base""": 5_12,
"""roberta-base-openai-detector""": 5_12,
"""roberta-large-openai-detector""": 5_12,
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ["input_ids", "attention_mask"]
__magic_name__ = RobertaTokenizer
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__="replace" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=False , snake_case__=True , **snake_case__ , ):
'''simple docstring'''
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , )
_lowerCAmelCase : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
_lowerCAmelCase : Tuple = getattr(snake_case__ , pre_tok_state.pop('type' ) )
_lowerCAmelCase : List[Any] = add_prefix_space
_lowerCAmelCase : List[str] = pre_tok_class(**snake_case__ )
_lowerCAmelCase : Union[str, Any] = add_prefix_space
_lowerCAmelCase : Union[str, Any] = 'post_processor'
_lowerCAmelCase : int = getattr(self.backend_tokenizer , snake_case__ , snake_case__ )
if tokenizer_component_instance:
_lowerCAmelCase : Dict = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCAmelCase : Any = tuple(state['sep'] )
if "cls" in state:
_lowerCAmelCase : str = tuple(state['cls'] )
_lowerCAmelCase : List[str] = False
if state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
_lowerCAmelCase : int = add_prefix_space
_lowerCAmelCase : Tuple = True
if state.get('trim_offsets' , snake_case__ ) != trim_offsets:
_lowerCAmelCase : Union[str, Any] = trim_offsets
_lowerCAmelCase : Optional[int] = True
if changes_to_apply:
_lowerCAmelCase : Any = getattr(snake_case__ , state.pop('type' ) )
_lowerCAmelCase : Optional[int] = component_class(**snake_case__ )
setattr(self.backend_tokenizer , snake_case__ , snake_case__ )
@property
def a ( self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value
_lowerCAmelCase : Tuple = value
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = kwargs.get('is_split_into_words' , snake_case__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = kwargs.get('is_split_into_words' , snake_case__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*snake_case__ , **snake_case__ )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : int = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def a ( self , snake_case__ , snake_case__=None ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 25 | 0 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=100 , snake_case__=13 , snake_case__=30 , snake_case__=2 , snake_case__=3 , snake_case__=True , snake_case__=True , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=10 , snake_case__=0.02 , snake_case__=3 , ):
'''simple docstring'''
_lowerCAmelCase : int = parent
_lowerCAmelCase : str = vocab_size
_lowerCAmelCase : Tuple = batch_size
_lowerCAmelCase : Tuple = image_size
_lowerCAmelCase : Optional[Any] = patch_size
_lowerCAmelCase : Optional[int] = num_channels
_lowerCAmelCase : Any = is_training
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : List[str] = num_hidden_layers
_lowerCAmelCase : int = num_attention_heads
_lowerCAmelCase : Dict = intermediate_size
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : int = hidden_dropout_prob
_lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCAmelCase : Any = type_sequence_label_size
_lowerCAmelCase : str = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase : Tuple = (image_size // patch_size) ** 2
_lowerCAmelCase : List[Any] = num_patches + 1
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[Any] = None
if self.use_labels:
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Union[str, Any] = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def a ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = FlaxBeitModel(config=lowerCAmelCase__ )
_lowerCAmelCase : Union[str, Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = FlaxBeitForMaskedImageModeling(config=lowerCAmelCase__ )
_lowerCAmelCase : Any = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def a ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.type_sequence_label_size
_lowerCAmelCase : List[str] = FlaxBeitForImageClassification(config=lowerCAmelCase__ )
_lowerCAmelCase : int = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase : int = 1
_lowerCAmelCase : List[Any] = FlaxBeitForImageClassification(lowerCAmelCase__ )
_lowerCAmelCase : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase : Any = model(lowerCAmelCase__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
(
_lowerCAmelCase
) : Union[str, Any] = config_and_inputs
_lowerCAmelCase : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class UpperCamelCase__ ( a__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = FlaxBeitModelTester(self )
_lowerCAmelCase : Dict = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 )
def a ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Tuple = model_class(lowerCAmelCase__ )
_lowerCAmelCase : Any = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : List[Any] = [*signature.parameters.keys()]
_lowerCAmelCase : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCAmelCase : Any = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
_lowerCAmelCase : Optional[int] = model_class(lowerCAmelCase__ )
@jax.jit
def model_jitted(snake_case__ , **snake_case__ ):
return model(pixel_values=lowerCAmelCase__ , **lowerCAmelCase__ )
with self.subTest('JIT Enabled' ):
_lowerCAmelCase : Tuple = model_jitted(**lowerCAmelCase__ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_lowerCAmelCase : Optional[Any] = model_jitted(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
for jitted_output, output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def a ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_lowerCAmelCase : str = model_class_name.from_pretrained('microsoft/beit-base-patch16-224' )
_lowerCAmelCase : List[str] = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(lowerCAmelCase__ )
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def a ( self ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = FlaxBeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' )
_lowerCAmelCase : List[str] = self.default_image_processor
_lowerCAmelCase : Dict = prepare_img()
_lowerCAmelCase : Tuple = image_processor(images=lowerCAmelCase__ , return_tensors='np' ).pixel_values
# prepare bool_masked_pos
_lowerCAmelCase : List[Any] = np.ones((1, 196) , dtype=lowerCAmelCase__ )
# forward pass
_lowerCAmelCase : str = model(pixel_values=lowerCAmelCase__ , bool_masked_pos=lowerCAmelCase__ )
_lowerCAmelCase : int = outputs.logits
# verify the logits
_lowerCAmelCase : List[Any] = (1, 196, 8192)
self.assertEqual(logits.shape , lowerCAmelCase__ )
_lowerCAmelCase : List[Any] = np.array(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , lowerCAmelCase__ , atol=1E-2 ) )
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' )
_lowerCAmelCase : Any = self.default_image_processor
_lowerCAmelCase : Optional[int] = prepare_img()
_lowerCAmelCase : List[str] = image_processor(images=lowerCAmelCase__ , return_tensors='np' )
# forward pass
_lowerCAmelCase : Optional[int] = model(**lowerCAmelCase__ )
_lowerCAmelCase : List[Any] = outputs.logits
# verify the logits
_lowerCAmelCase : Optional[int] = (1, 1000)
self.assertEqual(logits.shape , lowerCAmelCase__ )
_lowerCAmelCase : List[str] = np.array([-1.2385, -1.0987, -1.0108] )
self.assertTrue(np.allclose(logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
_lowerCAmelCase : Dict = 281
self.assertEqual(logits.argmax(-1 ).item() , lowerCAmelCase__ )
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' )
_lowerCAmelCase : int = self.default_image_processor
_lowerCAmelCase : Optional[int] = prepare_img()
_lowerCAmelCase : Optional[Any] = image_processor(images=lowerCAmelCase__ , return_tensors='np' )
# forward pass
_lowerCAmelCase : str = model(**lowerCAmelCase__ )
_lowerCAmelCase : str = outputs.logits
# verify the logits
_lowerCAmelCase : str = (1, 2_1841)
self.assertEqual(logits.shape , lowerCAmelCase__ )
_lowerCAmelCase : str = np.array([1.6881, -0.2787, 0.5901] )
self.assertTrue(np.allclose(logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
_lowerCAmelCase : Optional[Any] = 2396
self.assertEqual(logits.argmax(-1 ).item() , lowerCAmelCase__ )
| 362 |
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = 0 # The first color of the flag.
lowerCAmelCase : Optional[int] = 1 # The second color of the flag.
lowerCAmelCase : int = 2 # The third color of the flag.
lowerCAmelCase : Any = (red, white, blue)
def lowercase (_A ):
"""simple docstring"""
if not sequence:
return []
if len(_A ) == 1:
return list(_A )
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : List[str] = len(_A ) - 1
_lowerCAmelCase : Optional[Any] = 0
while mid <= high:
if sequence[mid] == colors[0]:
_lowerCAmelCase , _lowerCAmelCase : Tuple = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
_lowerCAmelCase , _lowerCAmelCase : Tuple = sequence[high], sequence[mid]
high -= 1
else:
_lowerCAmelCase : Optional[int] = f'The elements inside the sequence must contains only {colors} values'
raise ValueError(_A )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : str = input("""Enter numbers separated by commas:\n""").strip()
lowerCAmelCase : Dict = [int(item.strip()) for item in user_input.split(""",""")]
print(F'''{dutch_national_flag_sort(unsorted)}''')
| 25 | 0 |
'''simple docstring'''
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : List[str] = ''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = [chr(i + 6_5 ) for i in range(2_6 )]
# Remove duplicate characters from key
_lowerCAmelCase : str = remove_duplicates(key.upper() )
_lowerCAmelCase : Any = len(_A )
# First fill cipher with key characters
_lowerCAmelCase : List[str] = {alphabet[i]: char for i, char in enumerate(_A )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_A ) , 2_6 ):
_lowerCAmelCase : Dict = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
_lowerCAmelCase : Dict = alphabet[i - offset]
_lowerCAmelCase : Union[str, Any] = char
return cipher_alphabet
def lowercase (_A , _A ):
"""simple docstring"""
return "".join(cipher_map.get(_A , _A ) for ch in message.upper() )
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : Dict = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_A , _A ) for ch in message.upper() )
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : str = input('Enter message to encode or decode: ' ).strip()
_lowerCAmelCase : Union[str, Any] = input('Enter keyword: ' ).strip()
_lowerCAmelCase : Dict = input('Encipher or decipher? E/D:' ).strip()[0].lower()
try:
_lowerCAmelCase : Tuple = {'e': encipher, 'd': decipher}[option]
except KeyError:
raise KeyError('invalid input option' )
_lowerCAmelCase : Optional[int] = create_cipher_map(_A )
print(func(_A , _A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 363 |
'''simple docstring'''
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Optional[int] = [3_1, 2_8, 3_1, 3_0, 3_1, 3_0, 3_1, 3_1, 3_0, 3_1, 3_0, 3_1]
_lowerCAmelCase : int = 6
_lowerCAmelCase : Dict = 1
_lowerCAmelCase : Optional[int] = 1_9_0_1
_lowerCAmelCase : Optional[Any] = 0
while year < 2_0_0_1:
day += 7
if (year % 4 == 0 and year % 1_0_0 != 0) or (year % 4_0_0 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
_lowerCAmelCase : List[str] = day - days_per_month[month - 2]
elif day > 2_9 and month == 2:
month += 1
_lowerCAmelCase : List[str] = day - 2_9
else:
if day > days_per_month[month - 1]:
month += 1
_lowerCAmelCase : List[str] = day - days_per_month[month - 2]
if month > 1_2:
year += 1
_lowerCAmelCase : Optional[int] = 1
if year < 2_0_0_1 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 25 | 0 |
'''simple docstring'''
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = len(lowerCAmelCase_ )
_lowerCAmelCase : List[str] = [0] * len_array
if len_array > 0:
_lowerCAmelCase : List[str] = array[0]
for i in range(1 , lowerCAmelCase_ ):
_lowerCAmelCase : Optional[int] = self.prefix_sum[i - 1] + array[i]
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(lowerCAmelCase_ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364 |
'''simple docstring'''
def lowercase (_A = 1_0_0_0_0_0_0 ):
"""simple docstring"""
_lowerCAmelCase : Any = set(range(3 , _A , 2 ) )
primes.add(2 )
for p in range(3 , _A , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _A , _A ) ) )
_lowerCAmelCase : Union[str, Any] = [float(_A ) for n in range(limit + 1 )]
for p in primes:
for n in range(_A , limit + 1 , _A ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 25 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.