code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import re
def lowerCAmelCase ( UpperCamelCase__ : str ):
"""simple docstring"""
if len(re.findall('''[ATCG]''' , UpperCamelCase__ ) ) != len(UpperCamelCase__ ):
raise ValueError('''Invalid Strand''' )
return dna.translate(dna.maketrans('''ATCG''' , '''TAGC''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 |
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__lowerCAmelCase : Optional[int] = [
"cross_validation.py",
"gradient_accumulation.py",
"local_sgd.py",
"multi_process_metrics.py",
"memory.py",
"automatic_gradient_accumulation.py",
"fsdp_with_peak_mem_tracking.py",
"deepspeed_with_config_support.py",
"megatron_lm_gpt_pretraining.py",
]
class A ( unittest.TestCase ):
def snake_case__ ( self : Any , __a : str , __a : bool , __a : str = None , __a : list = None ) -> Tuple:
__UpperCAmelCase = None
__UpperCAmelCase = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
__UpperCAmelCase = os.path.abspath('''examples''' )
for item in os.listdir(__a ):
if item not in EXCLUDE_EXAMPLES:
__UpperCAmelCase = os.path.join(__a , __a )
if os.path.isfile(__a ) and ".py" in item_path:
with self.subTest(
tested_script=__a , feature_script=__a , tested_section='''main()''' if parser_only else '''training_function()''' , ):
__UpperCAmelCase = compare_against_test(
os.path.join(__a , __a ) , __a , __a , __a )
__UpperCAmelCase = '''\n'''.join(__a )
if special_strings is not None:
for string in special_strings:
__UpperCAmelCase = diff.replace(__a , '''''' )
self.assertEqual(__a , '''''' )
def snake_case__ ( self : Optional[Any] ) -> str:
self.one_complete_example('''complete_nlp_example.py''' , __a )
self.one_complete_example('''complete_nlp_example.py''' , __a )
def snake_case__ ( self : List[str] ) -> Tuple:
__UpperCAmelCase = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
__UpperCAmelCase = [
''' ''' * 1_6 + '''{\n\n''',
''' ''' * 2_0 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 2_0 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 2_0 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 2_0 + '''"epoch": epoch,\n\n''',
''' ''' * 1_6 + '''},\n\n''',
''' ''' * 1_6 + '''step=epoch,\n''',
''' ''' * 1_2,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , __a , __a , __a )
self.one_complete_example('''complete_cv_example.py''' , __a , __a , __a )
@mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class A ( UpperCAmelCase ):
a_ = False
@classmethod
def snake_case__ ( cls : Tuple ) -> str:
super().setUpClass()
__UpperCAmelCase = tempfile.mkdtemp()
__UpperCAmelCase = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
__UpperCAmelCase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def snake_case__ ( cls : Dict ) -> int:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def snake_case__ ( self : Tuple ) -> Dict:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def snake_case__ ( self : str ) -> int:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
""".split()
__UpperCAmelCase = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def snake_case__ ( self : Any ) -> Any:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}
""".split()
__UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__a )
self.assertNotIn('''epoch 0:''' , __a )
self.assertIn('''epoch 1:''' , __a )
def snake_case__ ( self : Tuple ) -> Optional[int]:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}
""".split()
__UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__a )
if torch.cuda.is_available():
__UpperCAmelCase = torch.cuda.device_count()
else:
__UpperCAmelCase = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , __a )
self.assertIn('''epoch 1:''' , __a )
else:
self.assertIn('''epoch 0:''' , __a )
self.assertIn('''epoch 1:''' , __a )
@slow
def snake_case__ ( self : Any ) -> Optional[Any]:
__UpperCAmelCase = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
__UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__a )
__UpperCAmelCase = re.findall('''({.+})''' , __a )
__UpperCAmelCase = [r for r in results if '''accuracy''' in r][-1]
__UpperCAmelCase = ast.literal_eval(__a )
self.assertGreaterEqual(results['''accuracy'''] , 0.7_5 )
def snake_case__ ( self : Dict ) -> int:
__UpperCAmelCase = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Optional[Any] ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdir:
__UpperCAmelCase = f"""
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__a , '''tracking''' ) ) )
def snake_case__ ( self : Optional[int] ) -> List[Any]:
__UpperCAmelCase = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
__UpperCAmelCase = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 654 | 0 |
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
__lowerCAmelCase : List[Any] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__lowerCAmelCase : str = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
__lowerCAmelCase : int = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
__lowerCAmelCase : List[Any] = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__lowerCAmelCase : List[str] = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
__lowerCAmelCase : Optional[int] = [
("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"),
("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"),
("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"),
("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"),
("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"),
("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"),
("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"),
("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"),
("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"),
(
"zero-shot-object-detection",
"MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES",
"AutoModelForZeroShotObjectDetection",
),
("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"),
("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"),
("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"),
("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"),
(
"table-question-answering",
"MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForTableQuestionAnswering",
),
("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"),
("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"),
(
"next-sentence-prediction",
"MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES",
"AutoModelForNextSentencePrediction",
),
(
"audio-frame-classification",
"MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForAudioFrameClassification",
),
("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"),
(
"document-question-answering",
"MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForDocumentQuestionAnswering",
),
(
"visual-question-answering",
"MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForVisualQuestionAnswering",
),
("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"),
(
"zero-shot-image-classification",
"MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForZeroShotImageClassification",
),
("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"),
("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"),
("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"),
]
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , UpperCamelCase__ )
return [m.group(0 ) for m in matches]
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__UpperCAmelCase = {
config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
__UpperCAmelCase = collections.defaultdict(UpperCamelCase__ )
__UpperCAmelCase = collections.defaultdict(UpperCamelCase__ )
__UpperCAmelCase = collections.defaultdict(UpperCamelCase__ )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(UpperCamelCase__ ):
__UpperCAmelCase = None
if _re_tf_models.match(UpperCamelCase__ ) is not None:
__UpperCAmelCase = tf_models
__UpperCAmelCase = _re_tf_models.match(UpperCamelCase__ ).groups()[0]
elif _re_flax_models.match(UpperCamelCase__ ) is not None:
__UpperCAmelCase = flax_models
__UpperCAmelCase = _re_flax_models.match(UpperCamelCase__ ).groups()[0]
elif _re_pt_models.match(UpperCamelCase__ ) is not None:
__UpperCAmelCase = pt_models
__UpperCAmelCase = _re_pt_models.match(UpperCamelCase__ ).groups()[0]
if lookup_dict is not None:
while len(UpperCamelCase__ ) > 0:
if attr_name in model_prefix_to_model_type:
__UpperCAmelCase = True
break
# Try again after removing the last word in the name
__UpperCAmelCase = ''''''.join(camel_case_split(UpperCamelCase__ )[:-1] )
__UpperCAmelCase = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
__UpperCAmelCase = list(UpperCamelCase__ )
all_models.sort()
__UpperCAmelCase = {'''model_type''': all_models}
__UpperCAmelCase = [pt_models[t] for t in all_models]
__UpperCAmelCase = [tf_models[t] for t in all_models]
__UpperCAmelCase = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
__UpperCAmelCase = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
__UpperCAmelCase = '''AutoProcessor'''
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
__UpperCAmelCase = '''AutoTokenizer'''
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
__UpperCAmelCase = '''AutoFeatureExtractor'''
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
__UpperCAmelCase = '''AutoTokenizer'''
__UpperCAmelCase = [processors[t] for t in all_models]
return pd.DataFrame(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
__UpperCAmelCase = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""]
__UpperCAmelCase = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
# The type of pipeline may not exist in this framework
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ):
continue
# First extract all model_names
__UpperCAmelCase = []
for name in getattr(UpperCamelCase__ , UpperCamelCase__ ).values():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
model_names.append(UpperCamelCase__ )
else:
model_names.extend(list(UpperCamelCase__ ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def lowerCAmelCase ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ):
"""simple docstring"""
__UpperCAmelCase = get_frameworks_table()
__UpperCAmelCase = Dataset.from_pandas(UpperCamelCase__ )
__UpperCAmelCase = hf_hub_download(
'''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=UpperCamelCase__ )
__UpperCAmelCase = Dataset.from_json(UpperCamelCase__ )
__UpperCAmelCase = {
tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class'''])
for i in range(len(UpperCamelCase__ ) )
}
__UpperCAmelCase = update_pipeline_and_auto_class_table(UpperCamelCase__ )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
__UpperCAmelCase = sorted(table.keys() )
__UpperCAmelCase = pd.DataFrame(
{
'''model_class''': model_classes,
'''pipeline_tag''': [table[m][0] for m in model_classes],
'''auto_class''': [table[m][1] for m in model_classes],
} )
__UpperCAmelCase = Dataset.from_pandas(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(UpperCamelCase__ , '''frameworks.json''' ) )
tags_dataset.to_json(os.path.join(UpperCamelCase__ , '''pipeline_tags.json''' ) )
if commit_sha is not None:
__UpperCAmelCase = (
f"""Update with commit {commit_sha}\n\nSee: """
f"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
__UpperCAmelCase = '''Update'''
upload_folder(
repo_id='''huggingface/transformers-metadata''' , folder_path=UpperCamelCase__ , repo_type='''dataset''' , token=UpperCamelCase__ , commit_message=UpperCamelCase__ , )
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
__UpperCAmelCase = transformers_module.pipelines.SUPPORTED_TASKS
__UpperCAmelCase = []
for key in pipeline_tasks:
if key not in in_table:
__UpperCAmelCase = pipeline_tasks[key]['''pt''']
if isinstance(UpperCamelCase__ , (list, tuple) ):
__UpperCAmelCase = model[0]
__UpperCAmelCase = model.__name__
if model not in in_table.values():
missing.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
__UpperCAmelCase = ''', '''.join(UpperCamelCase__ )
raise ValueError(
'''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '''
f"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.")
parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.")
parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.")
__lowerCAmelCase : Tuple = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 701 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__lowerCAmelCase : Any = ""
__lowerCAmelCase : int = ""
__lowerCAmelCase : Union[str, Any] = ""
__lowerCAmelCase : Any = 1 # (0 is vertical, 1 is horizontal)
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase = get_dataset(UpperCamelCase__ , UpperCamelCase__ )
print('''Processing...''' )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = update_image_and_anno(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for index, image in enumerate(UpperCamelCase__ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__UpperCAmelCase = random_chars(3_2 )
__UpperCAmelCase = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__UpperCAmelCase = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(f"""/{file_root}.jpg""" , UpperCamelCase__ , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(f"""Success {index+1}/{len(UpperCamelCase__ )} with {file_name}""" )
__UpperCAmelCase = []
for anno in new_annos[index]:
__UpperCAmelCase = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(UpperCamelCase__ )
with open(f"""/{file_root}.txt""" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase = []
__UpperCAmelCase = []
for label_file in glob.glob(os.path.join(UpperCamelCase__ , '''*.txt''' ) ):
__UpperCAmelCase = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(UpperCamelCase__ ) as in_file:
__UpperCAmelCase = in_file.readlines()
__UpperCAmelCase = os.path.join(UpperCamelCase__ , f"""{label_name}.jpg""" )
__UpperCAmelCase = []
for obj_list in obj_lists:
__UpperCAmelCase = obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(UpperCamelCase__ )
labels.append(UpperCamelCase__ )
return img_paths, labels
def lowerCAmelCase ( UpperCamelCase__ : list , UpperCamelCase__ : list , UpperCamelCase__ : int = 1 ):
"""simple docstring"""
__UpperCAmelCase = []
__UpperCAmelCase = []
__UpperCAmelCase = []
for idx in range(len(UpperCamelCase__ ) ):
__UpperCAmelCase = []
__UpperCAmelCase = img_list[idx]
path_list.append(UpperCamelCase__ )
__UpperCAmelCase = anno_list[idx]
__UpperCAmelCase = cva.imread(UpperCamelCase__ )
if flip_type == 1:
__UpperCAmelCase = cva.flip(UpperCamelCase__ , UpperCamelCase__ )
for bbox in img_annos:
__UpperCAmelCase = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
__UpperCAmelCase = cva.flip(UpperCamelCase__ , UpperCamelCase__ )
for bbox in img_annos:
__UpperCAmelCase = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(UpperCamelCase__ )
new_imgs_list.append(UpperCamelCase__ )
return new_imgs_list, new_annos_lists, path_list
def lowerCAmelCase ( UpperCamelCase__ : int = 3_2 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
__UpperCAmelCase = ascii_lowercase + digits
return "".join(random.choice(UpperCamelCase__ ) for _ in range(UpperCamelCase__ ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 654 | 0 |
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class A ( UpperCAmelCase ):
def __init__( self : int , __a : Callable , __a : Optional[Features] = None , __a : str = None , __a : bool = False , __a : bool = False , __a : Optional[dict] = None , __a : Optional[int] = None , **__a : Optional[Any] , ) -> str:
super().__init__(
features=__a , cache_dir=__a , keep_in_memory=__a , streaming=__a , num_proc=__a , **__a , )
__UpperCAmelCase = Generator(
cache_dir=__a , features=__a , generator=__a , gen_kwargs=__a , **__a , )
def snake_case__ ( self : Optional[int] ) -> Optional[int]:
# Build iterable dataset
if self.streaming:
__UpperCAmelCase = self.builder.as_streaming_dataset(split='''train''' )
# Build regular (map-style) dataset
else:
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
self.builder.download_and_prepare(
download_config=__a , download_mode=__a , verification_mode=__a , base_path=__a , num_proc=self.num_proc , )
__UpperCAmelCase = self.builder.as_dataset(
split='''train''' , verification_mode=__a , in_memory=self.keep_in_memory )
return dataset
| 702 |
'''simple docstring'''
from pathlib import Path
import fire
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = Path(UpperCamelCase__ )
__UpperCAmelCase = Path(UpperCamelCase__ )
dest_dir.mkdir(exist_ok=UpperCamelCase__ )
for path in src_dir.iterdir():
__UpperCAmelCase = [x.rstrip() for x in list(path.open().readlines() )][:n]
__UpperCAmelCase = dest_dir.joinpath(path.name )
print(UpperCamelCase__ )
dest_path.open('''w''' ).write('''\n'''.join(UpperCamelCase__ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 654 | 0 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
__lowerCAmelCase : Optional[int] = "examples/"
__lowerCAmelCase : Dict = {
"examples": (re.compile(r"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(r"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(r"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), r"\1version=\"VERSION\","),
"doc": (re.compile(r"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
__lowerCAmelCase : List[str] = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
__lowerCAmelCase : int = "README.md"
def lowerCAmelCase ( UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple ):
"""simple docstring"""
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__UpperCAmelCase = f.read()
__UpperCAmelCase , __UpperCAmelCase = REPLACE_PATTERNS[pattern]
__UpperCAmelCase = replace.replace('''VERSION''' , UpperCamelCase__ )
__UpperCAmelCase = re_pattern.sub(UpperCamelCase__ , UpperCamelCase__ )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
for folder, directories, fnames in os.walk(UpperCamelCase__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ , pattern='''examples''' )
def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : Any=False ):
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not patch:
update_version_in_examples(UpperCamelCase__ )
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = '''🤗 Transformers currently provides the following architectures'''
__UpperCAmelCase = '''1. Want to contribute a new model?'''
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__UpperCAmelCase = f.readlines()
# Find the start of the list.
__UpperCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__UpperCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
__UpperCAmelCase = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(UpperCamelCase__ )
def lowerCAmelCase ( ):
"""simple docstring"""
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
__UpperCAmelCase = f.read()
__UpperCAmelCase = REPLACE_PATTERNS['''init'''][0].search(UpperCamelCase__ ).groups()[0]
return packaging.version.parse(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : Any=False ):
"""simple docstring"""
__UpperCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
__UpperCAmelCase = default_version.base_version
elif patch:
__UpperCAmelCase = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
__UpperCAmelCase = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
__UpperCAmelCase = input(f"""Which version are you releasing? [{default_version}]""" )
if len(UpperCamelCase__ ) == 0:
__UpperCAmelCase = default_version
print(f"""Updating version to {version}.""" )
global_version_update(UpperCamelCase__ , patch=UpperCamelCase__ )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = get_version()
__UpperCAmelCase = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
__UpperCAmelCase = current_version.base_version
# Check with the user we got that right.
__UpperCAmelCase = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(UpperCamelCase__ ) == 0:
__UpperCAmelCase = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(UpperCamelCase__ )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
__lowerCAmelCase : Tuple = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 703 |
'''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__UpperCAmelCase = f"""Input value of [number={number}] must be an integer"""
raise TypeError(UpperCamelCase__ )
if number < 1:
__UpperCAmelCase = f"""Input value of [number={number}] must be > 0"""
raise ValueError(UpperCamelCase__ )
__UpperCAmelCase = 1
for i in range(1 , UpperCamelCase__ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 654 | 0 |
'''simple docstring'''
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def lowerCAmelCase ( *UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
with open(UpperCamelCase__ , '''r''' ) as fh:
fcntl.flock(UpperCamelCase__ , fcntl.LOCK_EX )
try:
print(*UpperCamelCase__ )
finally:
fcntl.flock(UpperCamelCase__ , fcntl.LOCK_UN )
__lowerCAmelCase : Tuple = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
__lowerCAmelCase : Union[str, Any] = torch.device("cuda", local_rank)
__lowerCAmelCase : str = socket.gethostname()
__lowerCAmelCase : int = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__lowerCAmelCase : Union[str, Any] = dist.get_rank()
__lowerCAmelCase : int = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise
| 704 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def lowerCAmelCase ( ):
"""simple docstring"""
raise RuntimeError('''CUDA out of memory.''' )
class A ( nn.Module ):
def __init__( self : Optional[Any] ) -> int:
super().__init__()
__UpperCAmelCase = nn.Linear(3 , 4 )
__UpperCAmelCase = nn.BatchNormad(4 )
__UpperCAmelCase = nn.Linear(4 , 5 )
def snake_case__ ( self : List[str] , __a : Optional[int] ) -> Optional[int]:
return self.lineara(self.batchnorm(self.lineara(__a ) ) )
class A ( unittest.TestCase ):
def snake_case__ ( self : Optional[int] ) -> Any:
__UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(__a : Union[str, Any] ):
nonlocal batch_sizes
batch_sizes.append(__a )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(__a , [1_2_8, 6_4, 3_2, 1_6, 8] )
def snake_case__ ( self : str ) -> int:
__UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(__a : str , __a : Optional[int] ):
nonlocal batch_sizes
batch_sizes.append(__a )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
__UpperCAmelCase , __UpperCAmelCase = mock_training_loop_function('''hello''' )
self.assertListEqual(__a , [1_2_8, 6_4, 3_2, 1_6, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def snake_case__ ( self : Any ) -> int:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(__a : Optional[int] ):
pass
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def snake_case__ ( self : Any ) -> List[Any]:
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(__a : Dict ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def snake_case__ ( self : List[Any] ) -> List[str]:
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(__a : str , __a : Union[str, Any] , __a : int ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(__a ) as cm:
mock_training_loop_function(1_2_8 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(__a : Tuple ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def snake_case__ ( self : Any ) -> List[Any]:
__UpperCAmelCase = torch.cuda.memory_allocated()
__UpperCAmelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , __a )
__UpperCAmelCase = release_memory(__a )
self.assertEqual(torch.cuda.memory_allocated() , __a )
| 654 | 0 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
__lowerCAmelCase : Any = ["gpt2"]
__lowerCAmelCase : Union[str, Any] = "gpt2"
if is_tf_available():
class A ( tf.Module ):
def __init__( self : Any , __a : Any ) -> Optional[int]:
super().__init__()
__UpperCAmelCase = tokenizer
__UpperCAmelCase = AutoConfig.from_pretrained(__a )
__UpperCAmelCase = TFGPTaLMHeadModel.from_config(__a )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='''text''' ),) )
def snake_case__ ( self : int , __a : Optional[Any] ) -> int:
__UpperCAmelCase = self.tokenizer(__a )
__UpperCAmelCase = tokenized['''input_ids'''].to_tensor()
__UpperCAmelCase = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
__UpperCAmelCase = self.model(input_ids=__a , attention_mask=__a )['''logits''']
return outputs
@require_tf
@require_keras_nlp
class A ( unittest.TestCase ):
def snake_case__ ( self : int ) -> Dict:
super().setUp()
__UpperCAmelCase = [GPTaTokenizer.from_pretrained(__a ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
__UpperCAmelCase = [TFGPTaTokenizer.from_pretrained(__a ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__UpperCAmelCase = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
__UpperCAmelCase = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def snake_case__ ( self : Optional[Any] ) -> List[Any]:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
__UpperCAmelCase = tokenizer([test_inputs] , return_tensors='''tf''' )
__UpperCAmelCase = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
__UpperCAmelCase = python_outputs[key].numpy()
__UpperCAmelCase = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(__a , tf.intaa ) == tf_outputs_values ) )
@slow
def snake_case__ ( self : int ) -> List[Any]:
for tf_tokenizer in self.tf_tokenizers:
__UpperCAmelCase = tf.function(__a )
for test_inputs in self.test_sentences:
__UpperCAmelCase = tf.constant(__a )
__UpperCAmelCase = compiled_tokenizer(__a )
__UpperCAmelCase = tf_tokenizer(__a )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def snake_case__ ( self : Tuple ) -> Optional[Any]:
for tf_tokenizer in self.tf_tokenizers:
__UpperCAmelCase = ModelToSave(tokenizer=__a )
__UpperCAmelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__UpperCAmelCase = model.serving(__a ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__UpperCAmelCase = Path(__a ) / '''saved.model'''
tf.saved_model.save(__a , __a , signatures={'''serving_default''': model.serving} )
__UpperCAmelCase = tf.saved_model.load(__a )
__UpperCAmelCase = loaded_model.signatures['''serving_default'''](__a )['''output_0''']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def snake_case__ ( self : Optional[int] ) -> Dict:
for tf_tokenizer in self.tf_tokenizers:
__UpperCAmelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__UpperCAmelCase = tf_tokenizer(__a ) # Build model with some sample inputs
__UpperCAmelCase = tf_tokenizer.get_config()
__UpperCAmelCase = TFGPTaTokenizer.from_config(__a )
__UpperCAmelCase = model_from_config(__a )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def snake_case__ ( self : Dict ) -> List[str]:
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
__UpperCAmelCase = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
__UpperCAmelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__UpperCAmelCase = tf_tokenizer(__a , max_length=__a )
__UpperCAmelCase = out['''input_ids'''].numpy().shape[1]
assert out_length == max_length
| 705 |
'''simple docstring'''
from __future__ import annotations
import math
def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = u
for i in range(1 , UpperCamelCase__ ):
__UpperCAmelCase = temp * (u - i)
return temp
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = int(input('''enter the numbers of values: ''' ) )
__UpperCAmelCase = []
for _ in range(UpperCamelCase__ ):
y.append([] )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
y[i].append(UpperCamelCase__ )
__UpperCAmelCase = 0
print('''enter the values of parameters in a list: ''' )
__UpperCAmelCase = list(map(UpperCamelCase__ , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(UpperCamelCase__ ):
__UpperCAmelCase = float(input() )
__UpperCAmelCase = int(input('''enter the value to interpolate: ''' ) )
__UpperCAmelCase = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , UpperCamelCase__ ):
for j in range(n - i ):
__UpperCAmelCase = y[j + 1][i - 1] - y[j][i - 1]
__UpperCAmelCase = y[0][0]
for i in range(1 , UpperCamelCase__ ):
summ += (ucal(UpperCamelCase__ , UpperCamelCase__ ) * y[0][i]) / math.factorial(UpperCamelCase__ )
print(f"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 654 | 0 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class A :
def __init__( self : Dict , __a : Optional[int] , __a : List[Any]=2 , __a : Any=True , __a : str=False , __a : Tuple=1_0 , __a : str=3 , __a : Optional[int]=3_2 * 8 , __a : List[str]=3_2 * 8 , __a : Union[str, Any]=4 , __a : Dict=6_4 , ) -> Optional[int]:
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = is_training
__UpperCAmelCase = use_auxiliary_loss
__UpperCAmelCase = num_queries
__UpperCAmelCase = num_channels
__UpperCAmelCase = min_size
__UpperCAmelCase = max_size
__UpperCAmelCase = num_labels
__UpperCAmelCase = hidden_dim
__UpperCAmelCase = hidden_dim
def snake_case__ ( self : Optional[Any] ) -> List[Any]:
__UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__a )
__UpperCAmelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__a )
__UpperCAmelCase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__a ) > 0.5
).float()
__UpperCAmelCase = (torch.rand((self.batch_size, self.num_labels) , device=__a ) > 0.5).long()
__UpperCAmelCase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def snake_case__ ( self : List[str] ) -> Optional[Any]:
__UpperCAmelCase = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
__UpperCAmelCase = self.num_queries
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = [1, 1, 1, 1]
__UpperCAmelCase = self.num_channels
__UpperCAmelCase = 6_4
__UpperCAmelCase = 1_2_8
__UpperCAmelCase = self.hidden_dim
__UpperCAmelCase = self.hidden_dim
__UpperCAmelCase = self.hidden_dim
return config
def snake_case__ ( self : List[Any] ) -> List[str]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self.prepare_config_and_inputs()
__UpperCAmelCase = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def snake_case__ ( self : Tuple , __a : Optional[int] , __a : Dict ) -> Union[str, Any]:
__UpperCAmelCase = output.encoder_hidden_states
__UpperCAmelCase = output.pixel_decoder_hidden_states
__UpperCAmelCase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__a ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__a ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__a ) , config.decoder_layers )
def snake_case__ ( self : Tuple , __a : List[Any] , __a : Optional[Any] , __a : Dict , __a : Dict=False ) -> str:
with torch.no_grad():
__UpperCAmelCase = MaskaFormerModel(config=__a )
model.to(__a )
model.eval()
__UpperCAmelCase = model(pixel_values=__a , pixel_mask=__a )
__UpperCAmelCase = model(__a , output_hidden_states=__a )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__a , __a )
def snake_case__ ( self : Optional[int] , __a : str , __a : List[Any] , __a : Dict , __a : Any , __a : Optional[int] ) -> List[Any]:
__UpperCAmelCase = MaskaFormerForUniversalSegmentation(config=__a )
model.to(__a )
model.eval()
def comm_check_on_output(__a : List[str] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__UpperCAmelCase = model(pixel_values=__a , pixel_mask=__a )
__UpperCAmelCase = model(__a )
comm_check_on_output(__a )
__UpperCAmelCase = model(
pixel_values=__a , pixel_mask=__a , mask_labels=__a , class_labels=__a )
comm_check_on_output(__a )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class A ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
a_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
a_ = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
a_ = False
a_ = False
a_ = False
a_ = False
def snake_case__ ( self : str ) -> List[str]:
__UpperCAmelCase = MaskaFormerModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=__a , has_text_modality=__a )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
self.config_tester.run_common_tests()
def snake_case__ ( self : Optional[int] ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__a , **__a , output_hidden_states=__a )
def snake_case__ ( self : Optional[Any] ) -> Any:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*__a )
@unittest.skip(reason='''Mask2Former does not use inputs_embeds''' )
def snake_case__ ( self : List[Any] ) -> Dict:
pass
@unittest.skip(reason='''Mask2Former does not have a get_input_embeddings method''' )
def snake_case__ ( self : str ) -> Any:
pass
@unittest.skip(reason='''Mask2Former is not a generative model''' )
def snake_case__ ( self : Optional[int] ) -> Tuple:
pass
@unittest.skip(reason='''Mask2Former does not use token embeddings''' )
def snake_case__ ( self : List[str] ) -> Any:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def snake_case__ ( self : List[str] ) -> Tuple:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def snake_case__ ( self : Optional[int] ) -> Any:
pass
def snake_case__ ( self : Dict ) -> int:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(__a )
__UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase = [*signature.parameters.keys()]
__UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __a )
@slow
def snake_case__ ( self : Tuple ) -> Any:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
__UpperCAmelCase = MaskaFormerModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case__ ( self : Union[str, Any] ) -> int:
__UpperCAmelCase = (self.model_tester.min_size,) * 2
__UpperCAmelCase = {
'''pixel_values''': torch.randn((2, 3, *size) , device=__a ),
'''mask_labels''': torch.randn((2, 1_0, *size) , device=__a ),
'''class_labels''': torch.zeros(2 , 1_0 , device=__a ).long(),
}
__UpperCAmelCase = self.model_tester.get_config()
__UpperCAmelCase = MaskaFormerForUniversalSegmentation(__a ).to(__a )
__UpperCAmelCase = model(**__a )
self.assertTrue(outputs.loss is not None )
def snake_case__ ( self : Optional[int] ) -> int:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__a , **__a , output_hidden_states=__a )
def snake_case__ ( self : Optional[int] ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(__a ).to(__a )
__UpperCAmelCase = model(**__a , output_attentions=__a )
self.assertTrue(outputs.attentions is not None )
def snake_case__ ( self : List[Any] ) -> Union[str, Any]:
if not self.model_tester.is_training:
return
__UpperCAmelCase = self.all_model_classes[1]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
__UpperCAmelCase = model_class(__a )
model.to(__a )
model.train()
__UpperCAmelCase = model(__a , mask_labels=__a , class_labels=__a ).loss
loss.backward()
def snake_case__ ( self : str ) -> Dict:
__UpperCAmelCase = self.all_model_classes[1]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
__UpperCAmelCase = True
__UpperCAmelCase = True
__UpperCAmelCase = model_class(__a ).to(__a )
model.train()
__UpperCAmelCase = model(__a , mask_labels=__a , class_labels=__a )
__UpperCAmelCase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__UpperCAmelCase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
__UpperCAmelCase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__UpperCAmelCase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__a )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__lowerCAmelCase : Dict = 1e-4
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class A ( unittest.TestCase ):
@cached_property
def snake_case__ ( self : str ) -> Optional[int]:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def snake_case__ ( self : Optional[int] ) -> Optional[Any]:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def snake_case__ ( self : Any ) -> Optional[Any]:
__UpperCAmelCase = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(__a )
__UpperCAmelCase = self.default_image_processor
__UpperCAmelCase = prepare_img()
__UpperCAmelCase = image_processor(__a , return_tensors='''pt''' ).to(__a )
__UpperCAmelCase = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__a , (1, 3, 3_8_4, 3_8_4) )
with torch.no_grad():
__UpperCAmelCase = model(**__a )
__UpperCAmelCase = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(__a )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __a , atol=__a ) )
__UpperCAmelCase = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(__a )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __a , atol=__a ) )
__UpperCAmelCase = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(__a )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __a , atol=__a ) )
def snake_case__ ( self : List[str] ) -> Any:
__UpperCAmelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__a ).eval()
__UpperCAmelCase = self.default_image_processor
__UpperCAmelCase = prepare_img()
__UpperCAmelCase = image_processor(__a , return_tensors='''pt''' ).to(__a )
__UpperCAmelCase = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__a , (1, 3, 3_8_4, 3_8_4) )
with torch.no_grad():
__UpperCAmelCase = model(**__a )
# masks_queries_logits
__UpperCAmelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
__UpperCAmelCase = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
__UpperCAmelCase = torch.tensor(__a ).to(__a )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __a , atol=__a ) )
# class_queries_logits
__UpperCAmelCase = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
__UpperCAmelCase = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(__a )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __a , atol=__a ) )
def snake_case__ ( self : Optional[int] ) -> str:
__UpperCAmelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__a ).eval()
__UpperCAmelCase = self.default_image_processor
__UpperCAmelCase = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='''pt''' , )
__UpperCAmelCase = inputs['''pixel_values'''].to(__a )
__UpperCAmelCase = [el.to(__a ) for el in inputs['''mask_labels''']]
__UpperCAmelCase = [el.to(__a ) for el in inputs['''class_labels''']]
with torch.no_grad():
__UpperCAmelCase = model(**__a )
self.assertTrue(outputs.loss is not None )
| 706 |
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Dict = logging.get_logger(__name__)
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""encoder.deit.blocks.{i}.norm1.weight""", f"""encoder.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.norm1.bias""", f"""encoder.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.attn.proj.weight""", f"""encoder.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.attn.proj.bias""", f"""encoder.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.norm2.weight""", f"""encoder.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.norm2.bias""", f"""encoder.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc1.weight""", f"""encoder.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc1.bias""", f"""encoder.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc2.weight""", f"""encoder.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.mlp.fc2.bias""", f"""encoder.encoder.layer.{i}.output.dense.bias""") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''),
('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''),
('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''),
('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''),
('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''),
('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''),
] )
return rename_keys
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
__UpperCAmelCase = state_dict.pop(f"""encoder.deit.blocks.{i}.attn.qkv.weight""" )
__UpperCAmelCase = in_proj_weight[
: encoder_config.hidden_size, :
]
__UpperCAmelCase = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
__UpperCAmelCase = in_proj_weight[
-encoder_config.hidden_size :, :
]
def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase = dct.pop(UpperCamelCase__ )
__UpperCAmelCase = val
def lowerCAmelCase ( UpperCamelCase__ : Dict ):
"""simple docstring"""
if "handwritten" in checkpoint_url:
__UpperCAmelCase = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
__UpperCAmelCase = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'''
__UpperCAmelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert('''RGB''' )
return im
@torch.no_grad()
def lowerCAmelCase ( UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase = ViTConfig(image_size=3_8_4 , qkv_bias=UpperCamelCase__ )
__UpperCAmelCase = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
__UpperCAmelCase = 7_6_8
elif "large" in checkpoint_url:
# use ViT-large encoder
__UpperCAmelCase = 1_0_2_4
__UpperCAmelCase = 4_0_9_6
__UpperCAmelCase = 2_4
__UpperCAmelCase = 1_6
__UpperCAmelCase = 1_0_2_4
else:
raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
__UpperCAmelCase = False
__UpperCAmelCase = '''relu'''
__UpperCAmelCase = 1_0_2_4
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = False
# load HuggingFace model
__UpperCAmelCase = ViTModel(UpperCamelCase__ , add_pooling_layer=UpperCamelCase__ )
__UpperCAmelCase = TrOCRForCausalLM(UpperCamelCase__ )
__UpperCAmelCase = VisionEncoderDecoderModel(encoder=UpperCamelCase__ , decoder=UpperCamelCase__ )
model.eval()
# load state_dict of original model, rename some keys
__UpperCAmelCase = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='''cpu''' , check_hash=UpperCamelCase__ )['''model''']
__UpperCAmelCase = create_rename_keys(UpperCamelCase__ , UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
__UpperCAmelCase = state_dict.pop(UpperCamelCase__ )
if key.startswith('''decoder''' ) and "output_projection" not in key:
__UpperCAmelCase = val
else:
__UpperCAmelCase = val
# load state dict
model.load_state_dict(UpperCamelCase__ )
# Check outputs on an image
__UpperCAmelCase = ViTImageProcessor(size=encoder_config.image_size )
__UpperCAmelCase = RobertaTokenizer.from_pretrained('''roberta-large''' )
__UpperCAmelCase = TrOCRProcessor(UpperCamelCase__ , UpperCamelCase__ )
__UpperCAmelCase = processor(images=prepare_img(UpperCamelCase__ ) , return_tensors='''pt''' ).pixel_values
# verify logits
__UpperCAmelCase = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
__UpperCAmelCase = model(pixel_values=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ )
__UpperCAmelCase = outputs.logits
__UpperCAmelCase = torch.Size([1, 1, 5_0_2_6_5] )
if "trocr-base-handwritten" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-1.45_02, -4.66_83, -0.53_47, -2.92_91, 9.14_35, -3.05_71, 8.97_64, 1.75_60, 8.73_58, -1.53_11] )
elif "trocr-large-handwritten" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-2.64_37, -1.31_29, -2.25_96, -5.34_55, 6.35_39, 1.76_04, 5.49_91, 1.47_02, 5.61_13, 2.01_70] )
elif "trocr-base-printed" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-5.68_16, -5.83_88, 1.13_98, -6.90_34, 6.85_05, -2.43_93, 1.22_84, -1.02_32, -1.96_61, -3.92_10] )
elif "trocr-large-printed" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-6.01_62, -7.09_59, 4.41_55, -5.10_63, 7.04_68, -3.16_31, 2.64_66, -0.30_81, -0.81_06, -1.75_35] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :1_0] , UpperCamelCase__ , atol=1E-3 ), "First elements of logits not as expected"
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
__lowerCAmelCase : Optional[int] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 654 | 0 |
'''simple docstring'''
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def lowerCAmelCase ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , ):
"""simple docstring"""
__UpperCAmelCase = {
'''7z''': (seven_zip_file, SevenZipExtractor),
'''bz2''': (bza_file, BzipaExtractor),
'''gzip''': (gz_file, GzipExtractor),
'''lz4''': (lza_file, LzaExtractor),
'''tar''': (tar_file, TarExtractor),
'''xz''': (xz_file, XzExtractor),
'''zip''': (zip_file, ZipExtractor),
'''zstd''': (zstd_file, ZstdExtractor),
}
__UpperCAmelCase , __UpperCAmelCase = input_paths_and_base_extractors[compression_format]
if input_path is None:
__UpperCAmelCase = f"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(UpperCamelCase__ )
assert base_extractor.is_extractable(UpperCamelCase__ )
__UpperCAmelCase = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
base_extractor.extract(UpperCamelCase__ , UpperCamelCase__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
__UpperCAmelCase = file_path.read_text(encoding='''utf-8''' )
else:
__UpperCAmelCase = output_path.read_text(encoding='''utf-8''' )
__UpperCAmelCase = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : int , ):
"""simple docstring"""
__UpperCAmelCase = {
'''7z''': seven_zip_file,
'''bz2''': bza_file,
'''gzip''': gz_file,
'''lz4''': lza_file,
'''tar''': tar_file,
'''xz''': xz_file,
'''zip''': zip_file,
'''zstd''': zstd_file,
}
__UpperCAmelCase = input_paths[compression_format]
if input_path is None:
__UpperCAmelCase = f"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(UpperCamelCase__ )
__UpperCAmelCase = Extractor.infer_extractor_format(UpperCamelCase__ )
assert extractor_format is not None
__UpperCAmelCase = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
Extractor.extract(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
__UpperCAmelCase = file_path.read_text(encoding='''utf-8''' )
else:
__UpperCAmelCase = output_path.read_text(encoding='''utf-8''' )
__UpperCAmelCase = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : Dict ):
"""simple docstring"""
import tarfile
__UpperCAmelCase = tmp_path / '''data_dot_dot'''
directory.mkdir()
__UpperCAmelCase = directory / '''tar_file_with_dot_dot.tar'''
with tarfile.TarFile(UpperCamelCase__ , '''w''' ) as f:
f.add(UpperCamelCase__ , arcname=os.path.join('''..''' , text_file.name ) )
return path
@pytest.fixture
def lowerCAmelCase ( UpperCamelCase__ : List[Any] ):
"""simple docstring"""
import tarfile
__UpperCAmelCase = tmp_path / '''data_sym_link'''
directory.mkdir()
__UpperCAmelCase = directory / '''tar_file_with_sym_link.tar'''
os.symlink('''..''' , directory / '''subdir''' , target_is_directory=UpperCamelCase__ )
with tarfile.TarFile(UpperCamelCase__ , '''w''' ) as f:
f.add(str(directory / '''subdir''' ) , arcname='''subdir''' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'''insecure_tar_file, error_log''' , [('''tar_file_with_dot_dot''', '''illegal path'''), ('''tar_file_with_sym_link''', '''Symlink''')] , )
def lowerCAmelCase ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase = {
'''tar_file_with_dot_dot''': tar_file_with_dot_dot,
'''tar_file_with_sym_link''': tar_file_with_sym_link,
}
__UpperCAmelCase = insecure_tar_files[insecure_tar_file]
__UpperCAmelCase = tmp_path / '''extracted'''
TarExtractor.extract(UpperCamelCase__ , UpperCamelCase__ )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def lowerCAmelCase ( UpperCamelCase__ : List[Any] ):
"""simple docstring"""
__UpperCAmelCase = tmpdir / '''not_a_zip_file'''
# From: https://github.com/python/cpython/pull/5053
__UpperCAmelCase = (
b'''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'''
b'''\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'''
b'''DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'''
b'''\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'''
)
with not_a_zip_file.open('''wb''' ) as f:
f.write(UpperCamelCase__ )
assert zipfile.is_zipfile(str(UpperCamelCase__ ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(UpperCamelCase__ ) # but we're right
| 707 |
'''simple docstring'''
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class A ( unittest.TestCase ):
def snake_case__ ( self : List[Any] , __a : List[str] , __a : Optional[Any] ) -> List[Any]:
return f"""gaussian_noise_s={seed}_shape={'_'.join([str(__a ) for s in shape] )}.npy"""
def snake_case__ ( self : Dict ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def snake_case__ ( self : Optional[Any] , __a : Tuple=0 , __a : List[Any]=(4, 4, 6_4, 6_4) , __a : Optional[Any]=False ) -> Tuple:
__UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
__UpperCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(__a , __a ) ) , dtype=__a )
return image
def snake_case__ ( self : int , __a : Optional[Any]=False , __a : Optional[Any]="CompVis/stable-diffusion-v1-4" ) -> Any:
__UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
__UpperCAmelCase = '''bf16''' if fpaa else None
__UpperCAmelCase , __UpperCAmelCase = FlaxUNetaDConditionModel.from_pretrained(
__a , subfolder='''unet''' , dtype=__a , revision=__a )
return model, params
def snake_case__ ( self : str , __a : int=0 , __a : Tuple=(4, 7_7, 7_6_8) , __a : Optional[int]=False ) -> Union[str, Any]:
__UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
__UpperCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(__a , __a ) ) , dtype=__a )
return hidden_states
@parameterized.expand(
[
# fmt: off
[8_3, 4, [-0.2_3_2_3, -0.1_3_0_4, 0.0_8_1_3, -0.3_0_9_3, -0.0_9_1_9, -0.1_5_7_1, -0.1_1_2_5, -0.5_8_0_6]],
[1_7, 0.5_5, [-0.0_8_3_1, -0.2_4_4_3, 0.0_9_0_1, -0.0_9_1_9, 0.3_3_9_6, 0.0_1_0_3, -0.3_7_4_3, 0.0_7_0_1]],
[8, 0.8_9, [-0.4_8_6_3, 0.0_8_5_9, 0.0_8_7_5, -0.1_6_5_8, 0.9_1_9_9, -0.0_1_1_4, 0.4_8_3_9, 0.4_6_3_9]],
[3, 1_0_0_0, [-0.5_6_4_9, 0.2_4_0_2, -0.5_5_1_8, 0.1_2_4_8, 1.1_3_2_8, -0.2_4_4_3, -0.0_3_2_5, -1.0_0_7_8]],
# fmt: on
] )
def snake_case__ ( self : Tuple , __a : Tuple , __a : str , __a : Optional[Any] ) -> Any:
__UpperCAmelCase , __UpperCAmelCase = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=__a )
__UpperCAmelCase = self.get_latents(__a , fpaa=__a )
__UpperCAmelCase = self.get_encoder_hidden_states(__a , fpaa=__a )
__UpperCAmelCase = model.apply(
{'''params''': params} , __a , jnp.array(__a , dtype=jnp.intaa ) , encoder_hidden_states=__a , ).sample
assert sample.shape == latents.shape
__UpperCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__UpperCAmelCase = jnp.array(__a , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(__a , __a , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[8_3, 4, [0.1_5_1_4, 0.0_8_0_7, 0.1_6_2_4, 0.1_0_1_6, -0.1_8_9_6, 0.0_2_6_3, 0.0_6_7_7, 0.2_3_1_0]],
[1_7, 0.5_5, [0.1_1_6_4, -0.0_2_1_6, 0.0_1_7_0, 0.1_5_8_9, -0.3_1_2_0, 0.1_0_0_5, -0.0_5_8_1, -0.1_4_5_8]],
[8, 0.8_9, [-0.1_7_5_8, -0.0_1_6_9, 0.1_0_0_4, -0.1_4_1_1, 0.1_3_1_2, 0.1_1_0_3, -0.1_9_9_6, 0.2_1_3_9]],
[3, 1_0_0_0, [0.1_2_1_4, 0.0_3_5_2, -0.0_7_3_1, -0.1_5_6_2, -0.0_9_9_4, -0.0_9_0_6, -0.2_3_4_0, -0.0_5_3_9]],
# fmt: on
] )
def snake_case__ ( self : Optional[Any] , __a : Optional[int] , __a : Optional[Any] , __a : Optional[Any] ) -> Union[str, Any]:
__UpperCAmelCase , __UpperCAmelCase = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=__a )
__UpperCAmelCase = self.get_latents(__a , shape=(4, 4, 9_6, 9_6) , fpaa=__a )
__UpperCAmelCase = self.get_encoder_hidden_states(__a , shape=(4, 7_7, 1_0_2_4) , fpaa=__a )
__UpperCAmelCase = model.apply(
{'''params''': params} , __a , jnp.array(__a , dtype=jnp.intaa ) , encoder_hidden_states=__a , ).sample
assert sample.shape == latents.shape
__UpperCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__UpperCAmelCase = jnp.array(__a , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(__a , __a , atol=1e-2 )
| 654 | 0 |
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 708 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
__lowerCAmelCase : Optional[int] = "examples/"
__lowerCAmelCase : Dict = {
"examples": (re.compile(r"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(r"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(r"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), r"\1version=\"VERSION\","),
"doc": (re.compile(r"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
__lowerCAmelCase : List[str] = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
__lowerCAmelCase : int = "README.md"
def lowerCAmelCase ( UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple ):
"""simple docstring"""
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__UpperCAmelCase = f.read()
__UpperCAmelCase , __UpperCAmelCase = REPLACE_PATTERNS[pattern]
__UpperCAmelCase = replace.replace('''VERSION''' , UpperCamelCase__ )
__UpperCAmelCase = re_pattern.sub(UpperCamelCase__ , UpperCamelCase__ )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
for folder, directories, fnames in os.walk(UpperCamelCase__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ , pattern='''examples''' )
def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : Any=False ):
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not patch:
update_version_in_examples(UpperCamelCase__ )
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = '''🤗 Transformers currently provides the following architectures'''
__UpperCAmelCase = '''1. Want to contribute a new model?'''
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__UpperCAmelCase = f.readlines()
# Find the start of the list.
__UpperCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__UpperCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
__UpperCAmelCase = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(UpperCamelCase__ )
def lowerCAmelCase ( ):
"""simple docstring"""
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
__UpperCAmelCase = f.read()
__UpperCAmelCase = REPLACE_PATTERNS['''init'''][0].search(UpperCamelCase__ ).groups()[0]
return packaging.version.parse(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : Any=False ):
"""simple docstring"""
__UpperCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
__UpperCAmelCase = default_version.base_version
elif patch:
__UpperCAmelCase = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
__UpperCAmelCase = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
__UpperCAmelCase = input(f"""Which version are you releasing? [{default_version}]""" )
if len(UpperCamelCase__ ) == 0:
__UpperCAmelCase = default_version
print(f"""Updating version to {version}.""" )
global_version_update(UpperCamelCase__ , patch=UpperCamelCase__ )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = get_version()
__UpperCAmelCase = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
__UpperCAmelCase = current_version.base_version
# Check with the user we got that right.
__UpperCAmelCase = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(UpperCamelCase__ ) == 0:
__UpperCAmelCase = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(UpperCamelCase__ )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
__lowerCAmelCase : Tuple = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 654 | 0 |
'''simple docstring'''
from itertools import count
def lowerCAmelCase ( UpperCamelCase__ : int = 5_0 ):
"""simple docstring"""
__UpperCAmelCase = [1] * min_block_length
for n in count(UpperCamelCase__ ):
fill_count_functions.append(1 )
for block_length in range(UpperCamelCase__ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_0_0_0_0_0_0:
break
return n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 709 |
'''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : Tuple ):
"""simple docstring"""
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
__UpperCAmelCase = len(UpperCamelCase__ )
__UpperCAmelCase = max(UpperCamelCase__ )
__UpperCAmelCase = min(UpperCamelCase__ )
# create the counting array
__UpperCAmelCase = coll_max + 1 - coll_min
__UpperCAmelCase = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , UpperCamelCase__ ):
__UpperCAmelCase = counting_arr[i] + counting_arr[i - 1]
# create the output collection
__UpperCAmelCase = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , UpperCamelCase__ ) ):
__UpperCAmelCase = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def lowerCAmelCase ( UpperCamelCase__ : Any ):
"""simple docstring"""
return "".join([chr(UpperCamelCase__ ) for i in counting_sort([ord(UpperCamelCase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("thisisthestring") == "eghhiiinrsssttt"
__lowerCAmelCase : str = input("Enter numbers separated by a comma:\n").strip()
__lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(",")]
print(counting_sort(unsorted))
| 654 | 0 |
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
__lowerCAmelCase : Tuple = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
__lowerCAmelCase : Any = 10
__lowerCAmelCase : Dict = 256
def lowerCAmelCase ( UpperCamelCase__ : List[str] ):
"""simple docstring"""
if len(UpperCamelCase__ ) < MIN_NUM_TOKENS:
return None
__UpperCAmelCase = MinHash(num_perm=UpperCamelCase__ )
for token in set(UpperCamelCase__ ):
min_hash.update(token.encode() )
return min_hash
def lowerCAmelCase ( UpperCamelCase__ : str ):
"""simple docstring"""
return {t for t in NON_ALPHA.split(UpperCamelCase__ ) if len(t.strip() ) > 0}
class A :
def __init__( self : List[Any] , *,
__a : float = 0.8_5 , ) -> Optional[int]:
__UpperCAmelCase = duplication_jaccard_threshold
__UpperCAmelCase = NUM_PERM
__UpperCAmelCase = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__UpperCAmelCase = defaultdict(__a )
def snake_case__ ( self : Dict , __a : Tuple , __a : MinHash ) -> None:
__UpperCAmelCase = self._index.query(__a )
if code_key in self._index.keys:
print(f"""Duplicate key {code_key}""" )
return
self._index.insert(__a , __a )
if len(__a ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(__a )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(__a )
def snake_case__ ( self : str ) -> List[List[Dict]]:
__UpperCAmelCase = []
for base, duplicates in self._duplicate_clusters.items():
__UpperCAmelCase = [base] + list(__a )
# reformat the cluster to be a list of dict
__UpperCAmelCase = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(__a )
return duplicate_clusters
def snake_case__ ( self : int , __a : str ) -> None:
__UpperCAmelCase = self.get_duplicate_clusters()
with open(__a , '''w''' ) as f:
json.dump(__a , __a )
def lowerCAmelCase ( UpperCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase = element
__UpperCAmelCase = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowerCAmelCase ( UpperCamelCase__ : Type[Dataset] ):
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(UpperCamelCase__ , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ):
if data is not None:
yield data
def lowerCAmelCase ( UpperCamelCase__ : Type[Dataset] , UpperCamelCase__ : float ):
"""simple docstring"""
__UpperCAmelCase = DuplicationIndex(duplication_jaccard_threshold=UpperCamelCase__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(UpperCamelCase__ ) ) , max_queue_size=1_0_0 ) ):
di.add(UpperCamelCase__ , UpperCamelCase__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase = get_tokens(UpperCamelCase__ )
__UpperCAmelCase = get_tokens(UpperCamelCase__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
__lowerCAmelCase : str = None
def lowerCAmelCase ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict ):
"""simple docstring"""
__UpperCAmelCase = []
for elementa in cluster:
__UpperCAmelCase = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
__UpperCAmelCase = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(UpperCamelCase__ , UpperCamelCase__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__UpperCAmelCase = 1
extremes.append(UpperCamelCase__ )
return extremes
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
global _shared_dataset
__UpperCAmelCase = dataset
__UpperCAmelCase = []
__UpperCAmelCase = partial(_find_cluster_extremes_shared , jaccard_threshold=UpperCamelCase__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
UpperCamelCase__ , UpperCamelCase__ , ) , total=len(UpperCamelCase__ ) , ):
extremes_list.append(UpperCamelCase__ )
return extremes_list
def lowerCAmelCase ( UpperCamelCase__ : Type[Dataset] , UpperCamelCase__ : float = 0.85 ):
"""simple docstring"""
__UpperCAmelCase = make_duplicate_clusters(UpperCamelCase__ , UpperCamelCase__ )
__UpperCAmelCase = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
__UpperCAmelCase = {}
__UpperCAmelCase = find_extremes(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for extremes in extremes_clusters:
for element in extremes:
__UpperCAmelCase = element
__UpperCAmelCase = duplicate_indices - set(extreme_dict.keys() )
__UpperCAmelCase = dataset.filter(lambda UpperCamelCase__ , UpperCamelCase__ : idx not in remove_indices , with_indices=UpperCamelCase__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__UpperCAmelCase = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
__UpperCAmelCase = extreme_dict[element['''base_index''']]['''copies''']
print(f"""Original dataset size: {len(UpperCamelCase__ )}""" )
print(f"""Number of duplicate clusters: {len(UpperCamelCase__ )}""" )
print(f"""Files in duplicate cluster: {len(UpperCamelCase__ )}""" )
print(f"""Unique files in duplicate cluster: {len(UpperCamelCase__ )}""" )
print(f"""Filtered dataset size: {len(UpperCamelCase__ )}""" )
return ds_filter, duplicate_clusters
| 710 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def lowerCAmelCase ( UpperCamelCase__ : str = "AAPL" ):
"""simple docstring"""
__UpperCAmelCase = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
__UpperCAmelCase = BeautifulSoup(requests.get(UpperCamelCase__ ).text , '''html.parser''' )
__UpperCAmelCase = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 654 | 0 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__lowerCAmelCase : Any = pytest.mark.integration
@require_faiss
class A ( UpperCAmelCase ):
def snake_case__ ( self : Dict ) -> Dict:
__UpperCAmelCase = Dataset.from_dict({'''filename''': ['''my_name-train''' + '''_''' + str(__a ) for x in np.arange(3_0 ).tolist()]} )
return dset
def snake_case__ ( self : str ) -> int:
import faiss
__UpperCAmelCase = self._create_dummy_dataset()
__UpperCAmelCase = dset.map(
lambda __a , __a : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__a , keep_in_memory=__a )
__UpperCAmelCase = dset.add_faiss_index('''vecs''' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT )
__UpperCAmelCase , __UpperCAmelCase = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
dset.drop_index('''vecs''' )
def snake_case__ ( self : Any ) -> List[Any]:
import faiss
__UpperCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='''vecs''' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__UpperCAmelCase , __UpperCAmelCase = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def snake_case__ ( self : Optional[Any] ) -> Optional[int]:
import faiss
__UpperCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='''vecs''' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__a ) as tmp_file:
dset.save_faiss_index('''vecs''' , tmp_file.name )
dset.load_faiss_index('''vecs2''' , tmp_file.name )
os.unlink(tmp_file.name )
__UpperCAmelCase , __UpperCAmelCase = dset.get_nearest_examples('''vecs2''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def snake_case__ ( self : Dict ) -> List[str]:
__UpperCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='''vecs''' )
dset.drop_index('''vecs''' )
self.assertRaises(__a , partial(dset.get_nearest_examples , '''vecs2''' , np.ones(5 , dtype=np.floataa ) ) )
def snake_case__ ( self : Optional[Any] ) -> Optional[int]:
from elasticsearch import Elasticsearch
__UpperCAmelCase = self._create_dummy_dataset()
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
__UpperCAmelCase = {'''acknowledged''': True}
mocked_bulk.return_value([(True, None)] * 3_0 )
__UpperCAmelCase = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 2_9}]}}
__UpperCAmelCase = Elasticsearch()
dset.add_elasticsearch_index('''filename''' , es_client=__a )
__UpperCAmelCase , __UpperCAmelCase = dset.get_nearest_examples('''filename''' , '''my_name-train_29''' )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
@require_faiss
class A ( UpperCAmelCase ):
def snake_case__ ( self : Union[str, Any] ) -> Dict:
import faiss
__UpperCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 1_0 )
# single query
__UpperCAmelCase = np.zeros(5 , dtype=np.floataa )
__UpperCAmelCase = 1
__UpperCAmelCase , __UpperCAmelCase = index.search(__a )
self.assertRaises(__a , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
__UpperCAmelCase = np.eye(5 , dtype=np.floataa )[::-1]
__UpperCAmelCase , __UpperCAmelCase = index.search_batch(__a )
self.assertRaises(__a , index.search_batch , queries[0] )
__UpperCAmelCase = [scores[0] for scores in total_scores]
__UpperCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__a ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __a )
def snake_case__ ( self : int ) -> int:
import faiss
__UpperCAmelCase = FaissIndex(string_factory='''Flat''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
__UpperCAmelCase = FaissIndex(string_factory='''LSH''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__a ):
__UpperCAmelCase = FaissIndex(string_factory='''Flat''' , custom_index=faiss.IndexFlat(5 ) )
def snake_case__ ( self : List[Any] ) -> str:
import faiss
__UpperCAmelCase = faiss.IndexFlat(5 )
__UpperCAmelCase = FaissIndex(custom_index=__a )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def snake_case__ ( self : int ) -> Any:
import faiss
__UpperCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__a ) as tmp_file:
index.save(tmp_file.name )
__UpperCAmelCase = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
__UpperCAmelCase = np.zeros(5 , dtype=np.floataa )
__UpperCAmelCase = 1
__UpperCAmelCase , __UpperCAmelCase = index.search(__a )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def lowerCAmelCase ( UpperCamelCase__ : Tuple ):
"""simple docstring"""
import faiss
__UpperCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
__UpperCAmelCase = '''index.faiss'''
__UpperCAmelCase = f"""mock://{index_name}"""
index.save(UpperCamelCase__ , storage_options=mockfs.storage_options )
__UpperCAmelCase = FaissIndex.load(UpperCamelCase__ , storage_options=mockfs.storage_options )
__UpperCAmelCase = np.zeros(5 , dtype=np.floataa )
__UpperCAmelCase = 1
__UpperCAmelCase , __UpperCAmelCase = index.search(UpperCamelCase__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class A ( UpperCAmelCase ):
def snake_case__ ( self : Union[str, Any] ) -> str:
from elasticsearch import Elasticsearch
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
__UpperCAmelCase = Elasticsearch()
__UpperCAmelCase = {'''acknowledged''': True}
__UpperCAmelCase = ElasticSearchIndex(es_client=__a )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['''foo''', '''bar''', '''foobar'''] )
# single query
__UpperCAmelCase = '''foo'''
__UpperCAmelCase = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
__UpperCAmelCase , __UpperCAmelCase = index.search(__a )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
__UpperCAmelCase = '''foo'''
__UpperCAmelCase = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
__UpperCAmelCase , __UpperCAmelCase = index.search(__a , request_timeout=3_0 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
__UpperCAmelCase = ['''foo''', '''bar''', '''foobar''']
__UpperCAmelCase = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
__UpperCAmelCase , __UpperCAmelCase = index.search_batch(__a )
__UpperCAmelCase = [scores[0] for scores in total_scores]
__UpperCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__a ) , 0 )
self.assertListEqual([1, 1, 1] , __a )
# batched queries with timeout
__UpperCAmelCase = ['''foo''', '''bar''', '''foobar''']
__UpperCAmelCase = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
__UpperCAmelCase , __UpperCAmelCase = index.search_batch(__a , request_timeout=3_0 )
__UpperCAmelCase = [scores[0] for scores in total_scores]
__UpperCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__a ) , 0 )
self.assertListEqual([1, 1, 1] , __a )
| 711 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def lowerCAmelCase ( UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = [0] * no_of_processes
__UpperCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(UpperCamelCase__ ):
__UpperCAmelCase = burst_time[i]
__UpperCAmelCase = []
__UpperCAmelCase = 0
__UpperCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__UpperCAmelCase = []
__UpperCAmelCase = -1
for i in range(UpperCamelCase__ ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
__UpperCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__UpperCAmelCase = i
total_time += burst_time[target_process]
completed += 1
__UpperCAmelCase = 0
__UpperCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def lowerCAmelCase ( UpperCamelCase__ : list[int] , UpperCamelCase__ : int , UpperCamelCase__ : list[int] ):
"""simple docstring"""
__UpperCAmelCase = [0] * no_of_processes
for i in range(UpperCamelCase__ ):
__UpperCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("[TEST CASE 01]")
__lowerCAmelCase : List[Any] = 4
__lowerCAmelCase : List[Any] = [2, 5, 3, 7]
__lowerCAmelCase : Tuple = [0, 0, 0, 0]
__lowerCAmelCase : Optional[int] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__lowerCAmelCase : Dict = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time")
for i, process_id in enumerate(list(range(1, 5))):
print(
F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 654 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class A ( UpperCAmelCase , unittest.TestCase ):
a_ = BioGptTokenizer
a_ = False
def snake_case__ ( self : Any ) -> str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__UpperCAmelCase = dict(zip(__a , range(len(__a ) ) ) )
__UpperCAmelCase = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(__a ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(__a ) )
def snake_case__ ( self : Dict , __a : Any ) -> List[Any]:
__UpperCAmelCase = '''lower newer'''
__UpperCAmelCase = '''lower newer'''
return input_text, output_text
def snake_case__ ( self : Any ) -> Tuple:
__UpperCAmelCase = BioGptTokenizer(self.vocab_file , self.merges_file )
__UpperCAmelCase = '''lower'''
__UpperCAmelCase = ['''low''', '''er</w>''']
__UpperCAmelCase = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
__UpperCAmelCase = tokens + ['''<unk>''']
__UpperCAmelCase = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
@slow
def snake_case__ ( self : List[Any] ) -> List[str]:
__UpperCAmelCase = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
__UpperCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=__a )
__UpperCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__a )
__UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(__a )
__UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(__a , __a )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 712 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : List[str] , **__a : Union[str, Any] ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : List[str] , **__a : int ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : int , **__a : List[Any] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : int , *__a : Optional[Any] , **__a : Tuple ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Tuple , **__a : Optional[Any] ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : str , **__a : Tuple ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : List[str] , **__a : int ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : List[str] , **__a : Optional[int] ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : str , **__a : Any ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[str] , *__a : Dict , **__a : List[str] ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : Any , **__a : List[Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : List[str] , **__a : Union[str, Any] ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : Optional[int] , **__a : Optional[int] ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Dict , **__a : List[Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : List[str] , **__a : List[str] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : Optional[Any] , **__a : int ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : Optional[int] , **__a : Dict ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Tuple , **__a : Any ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Optional[Any] , **__a : int ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Tuple , **__a : Optional[int] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : str , **__a : List[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : List[Any] , **__a : int ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : int , **__a : str ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : List[Any] , **__a : Any ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Tuple , *__a : str , **__a : Tuple ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : List[str] , **__a : Optional[Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Optional[int] , **__a : Union[str, Any] ) -> int:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : Union[str, Any] , **__a : List[str] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Dict , **__a : Union[str, Any] ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : Union[str, Any] , **__a : Any ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : int , **__a : int ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Optional[int] , **__a : int ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : Tuple , **__a : str ) -> Dict:
requires_backends(cls , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : int , **UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : List[Any] ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Any , **UpperCamelCase__ : List[str] ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : str , **UpperCamelCase__ : str ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Dict , **UpperCamelCase__ : Dict ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Tuple , **UpperCamelCase__ : int ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Tuple ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : str , **__a : List[Any] ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : int , **__a : int ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Dict , **__a : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : str , **__a : List[str] ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Dict , **__a : Tuple ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : List[Any] , **__a : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : List[Any] , **__a : List[Any] ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : Tuple , **__a : int ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Any , **__a : int ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : Optional[Any] , **__a : Tuple ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : str , **__a : int ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Dict , **__a : Any ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : str , **__a : str ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Union[str, Any] , **__a : str ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : int , **__a : Tuple ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Tuple , **__a : str ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Any , **__a : Dict ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : str , **__a : List[str] ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : int , **__a : int ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : str , **__a : List[Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : int , **__a : List[str] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : int , *__a : Union[str, Any] , **__a : Optional[Any] ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : List[Any] , **__a : str ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : Optional[Any] , **__a : Any ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[str] , *__a : List[str] , **__a : Dict ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Union[str, Any] , **__a : Optional[int] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : int , **__a : Dict ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[str] , *__a : List[str] , **__a : Union[str, Any] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : int , **__a : Dict ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Tuple , **__a : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : str , **__a : Any ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Dict , **__a : Optional[Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : str , **__a : Union[str, Any] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : Optional[Any] , **__a : List[str] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Union[str, Any] , **__a : Any ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Union[str, Any] , **__a : List[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Tuple , *__a : Optional[int] , **__a : List[Any] ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : List[str] , **__a : Dict ) -> List[str]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : Optional[Any] , **__a : List[Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Any , **__a : str ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : Optional[Any] , **__a : int ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : int , **__a : Optional[Any] ) -> int:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : List[str] , **__a : Dict ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Any , **__a : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Dict , **__a : Union[str, Any] ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : Tuple , **__a : Optional[int] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : List[Any] , **__a : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : List[Any] , **__a : int ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Optional[Any] , **__a : str ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : Tuple , **__a : int ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Optional[int] , **__a : Tuple ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : Union[str, Any] , **__a : List[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : List[Any] , **__a : List[str] ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[Any] , *__a : Optional[int] , **__a : int ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Union[str, Any] , **__a : List[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : int , **__a : Any ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : Any , **__a : int ) -> Tuple:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Union[str, Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : List[Any] , **__a : Dict ) -> int:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[Any] , *__a : int , **__a : Optional[int] ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : List[Any] , **__a : Any ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : Tuple , **__a : List[Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : Dict , **__a : Optional[int] ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : Any , **__a : Dict ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Any , **__a : Any ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : Tuple , **__a : Optional[int] ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Optional[Any] , **__a : Optional[Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : List[Any] , **__a : Dict ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : Union[str, Any] , **__a : Optional[int] ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Any , **__a : Optional[int] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : Union[str, Any] , **__a : List[str] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Optional[int] , **__a : List[Any] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : Dict , **__a : int ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Optional[int] , **__a : Union[str, Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Union[str, Any] , **__a : int ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : Optional[Any] , **__a : int ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : List[Any] , **__a : Optional[int] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Optional[Any] , **__a : Optional[int] ) -> Tuple:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : Optional[int] , **__a : List[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : List[str] , **__a : Union[str, Any] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : Tuple , **__a : Tuple ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : List[str] , **__a : Tuple ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Dict , **__a : Tuple ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Union[str, Any] , **__a : Optional[int] ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : List[str] , **__a : int ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : Tuple , **__a : Optional[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Any , **__a : List[str] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : int , **__a : int ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : Any , **__a : List[Any] ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : int , **__a : str ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : Dict , **__a : Optional[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : List[str] , **__a : int ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Any ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : str , **__a : Optional[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : List[Any] , **__a : List[str] ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Optional[Any] , **__a : str ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : List[Any] , **__a : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : List[Any] , **__a : Optional[Any] ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Any ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : List[str] , **__a : Any ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : List[str] , **__a : Dict ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : Dict , **__a : Optional[Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : str , **__a : Any ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[str] , *__a : Union[str, Any] , **__a : Optional[int] ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Dict , **__a : Tuple ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Any , **__a : Any ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : int , *__a : Any , **__a : Optional[Any] ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : int , **__a : List[Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Dict , **__a : Tuple ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Tuple , *__a : int , **__a : Optional[Any] ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Any , **__a : Optional[int] ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : List[str] , **__a : List[Any] ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : int , **__a : Any ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : List[str] , **__a : List[str] ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
| 654 | 0 |
'''simple docstring'''
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class A ( UpperCAmelCase , unittest.TestCase ):
a_ = MobileBertTokenizer
a_ = MobileBertTokenizerFast
a_ = True
a_ = True
a_ = filter_non_english
a_ = '''google/mobilebert-uncased'''
def snake_case__ ( self : Tuple ) -> Optional[int]:
super().setUp()
__UpperCAmelCase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
__UpperCAmelCase = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def snake_case__ ( self : Optional[int] , __a : Any ) -> Tuple:
__UpperCAmelCase = '''UNwant\u00E9d,running'''
__UpperCAmelCase = '''unwanted, running'''
return input_text, output_text
def snake_case__ ( self : Optional[Any] ) -> Dict:
__UpperCAmelCase = self.tokenizer_class(self.vocab_file )
__UpperCAmelCase = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__a , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def snake_case__ ( self : Tuple ) -> Any:
if not self.test_rust_tokenizer:
return
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = self.get_rust_tokenizer()
__UpperCAmelCase = '''UNwant\u00E9d,running'''
__UpperCAmelCase = tokenizer.tokenize(__a )
__UpperCAmelCase = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
__UpperCAmelCase = tokenizer.encode(__a , add_special_tokens=__a )
__UpperCAmelCase = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
__UpperCAmelCase = self.get_rust_tokenizer()
__UpperCAmelCase = tokenizer.encode(__a )
__UpperCAmelCase = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
# With lower casing
__UpperCAmelCase = self.get_tokenizer(do_lower_case=__a )
__UpperCAmelCase = self.get_rust_tokenizer(do_lower_case=__a )
__UpperCAmelCase = '''UNwant\u00E9d,running'''
__UpperCAmelCase = tokenizer.tokenize(__a )
__UpperCAmelCase = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
__UpperCAmelCase = tokenizer.encode(__a , add_special_tokens=__a )
__UpperCAmelCase = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
__UpperCAmelCase = self.get_rust_tokenizer()
__UpperCAmelCase = tokenizer.encode(__a )
__UpperCAmelCase = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
def snake_case__ ( self : List[Any] ) -> Any:
__UpperCAmelCase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def snake_case__ ( self : Union[str, Any] ) -> List[str]:
__UpperCAmelCase = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def snake_case__ ( self : Tuple ) -> int:
__UpperCAmelCase = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def snake_case__ ( self : Dict ) -> Optional[int]:
__UpperCAmelCase = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def snake_case__ ( self : str ) -> int:
__UpperCAmelCase = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def snake_case__ ( self : List[Any] ) -> Any:
__UpperCAmelCase = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def snake_case__ ( self : Optional[Any] ) -> Dict:
__UpperCAmelCase = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def snake_case__ ( self : Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def snake_case__ ( self : List[str] ) -> str:
__UpperCAmelCase = BasicTokenizer(do_lower_case=__a , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def snake_case__ ( self : Optional[int] ) -> List[Any]:
__UpperCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
__UpperCAmelCase = {}
for i, token in enumerate(__a ):
__UpperCAmelCase = i
__UpperCAmelCase = WordpieceTokenizer(vocab=__a , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def snake_case__ ( self : Optional[int] ) -> List[str]:
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def snake_case__ ( self : str ) -> str:
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def snake_case__ ( self : Tuple ) -> List[Any]:
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def snake_case__ ( self : Optional[Any] ) -> Union[str, Any]:
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__a ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(__a ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def snake_case__ ( self : Any ) -> int:
__UpperCAmelCase = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' )
__UpperCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=__a )
__UpperCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__a )
__UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(__a )
__UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def snake_case__ ( self : Tuple ) -> Dict:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(__a , **__a )
__UpperCAmelCase = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
__UpperCAmelCase = tokenizer_r.encode_plus(
__a , return_attention_mask=__a , return_token_type_ids=__a , return_offsets_mapping=__a , add_special_tokens=__a , )
__UpperCAmelCase = tokenizer_r.do_lower_case if hasattr(__a , '''do_lower_case''' ) else False
__UpperCAmelCase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''Allen'''),
((2_1, 2_3), '''##NL'''),
((2_3, 2_4), '''##P'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''allen'''),
((2_1, 2_3), '''##nl'''),
((2_3, 2_4), '''##p'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def snake_case__ ( self : Union[str, Any] ) -> Any:
__UpperCAmelCase = ['''的''', '''人''', '''有''']
__UpperCAmelCase = ''''''.join(__a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__UpperCAmelCase = True
__UpperCAmelCase = self.tokenizer_class.from_pretrained(__a , **__a )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(__a , **__a )
__UpperCAmelCase = tokenizer_p.encode(__a , add_special_tokens=__a )
__UpperCAmelCase = tokenizer_r.encode(__a , add_special_tokens=__a )
__UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(__a )
__UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
__UpperCAmelCase = False
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(__a , **__a )
__UpperCAmelCase = self.tokenizer_class.from_pretrained(__a , **__a )
__UpperCAmelCase = tokenizer_r.encode(__a , add_special_tokens=__a )
__UpperCAmelCase = tokenizer_p.encode(__a , add_special_tokens=__a )
__UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(__a )
__UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that only the first Chinese character is not preceded by "##".
__UpperCAmelCase = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(__a )
]
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
| 713 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 654 | 0 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class A ( UpperCAmelCase ):
def __init__( self : Optional[int] , __a : WhisperForConditionalGeneration , __a : WhisperProcessor , __a : AutoencoderKL , __a : CLIPTextModel , __a : CLIPTokenizer , __a : UNetaDConditionModel , __a : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __a : StableDiffusionSafetyChecker , __a : CLIPImageProcessor , ) -> Any:
super().__init__()
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
speech_model=__a , speech_processor=__a , vae=__a , text_encoder=__a , tokenizer=__a , unet=__a , scheduler=__a , feature_extractor=__a , )
def snake_case__ ( self : Any , __a : Optional[Union[str, int]] = "auto" ) -> Dict:
if slice_size == "auto":
__UpperCAmelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__a )
def snake_case__ ( self : List[str] ) -> Union[str, Any]:
self.enable_attention_slicing(__a )
@torch.no_grad()
def __call__( self : Optional[int] , __a : int , __a : Union[str, Any]=1_6_0_0_0 , __a : int = 5_1_2 , __a : int = 5_1_2 , __a : int = 5_0 , __a : float = 7.5 , __a : Optional[Union[str, List[str]]] = None , __a : Optional[int] = 1 , __a : float = 0.0 , __a : Optional[torch.Generator] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[str] = "pil" , __a : bool = True , __a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __a : int = 1 , **__a : Optional[int] , ) -> Tuple:
__UpperCAmelCase = self.speech_processor.feature_extractor(
__a , return_tensors='''pt''' , sampling_rate=__a ).input_features.to(self.device )
__UpperCAmelCase = self.speech_model.generate(__a , max_length=4_8_0_0_0_0 )
__UpperCAmelCase = self.speech_processor.tokenizer.batch_decode(__a , skip_special_tokens=__a , normalize=__a )[
0
]
if isinstance(__a , __a ):
__UpperCAmelCase = 1
elif isinstance(__a , __a ):
__UpperCAmelCase = len(__a )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(__a )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__a , __a ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(__a )}.""" )
# get prompt text embeddings
__UpperCAmelCase = self.tokenizer(
__a , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
__UpperCAmelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__UpperCAmelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
__UpperCAmelCase = text_input_ids[:, : self.tokenizer.model_max_length]
__UpperCAmelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = text_embeddings.shape
__UpperCAmelCase = text_embeddings.repeat(1 , __a , 1 )
__UpperCAmelCase = text_embeddings.view(bs_embed * num_images_per_prompt , __a , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__UpperCAmelCase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__UpperCAmelCase = 4_2
if negative_prompt is None:
__UpperCAmelCase = [''''''] * batch_size
elif type(__a ) is not type(__a ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(__a )} !="""
f""" {type(__a )}.""" )
elif isinstance(__a , __a ):
__UpperCAmelCase = [negative_prompt]
elif batch_size != len(__a ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(__a )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
''' the batch size of `prompt`.''' )
else:
__UpperCAmelCase = negative_prompt
__UpperCAmelCase = text_input_ids.shape[-1]
__UpperCAmelCase = self.tokenizer(
__a , padding='''max_length''' , max_length=__a , truncation=__a , return_tensors='''pt''' , )
__UpperCAmelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__UpperCAmelCase = uncond_embeddings.shape[1]
__UpperCAmelCase = uncond_embeddings.repeat(1 , __a , 1 )
__UpperCAmelCase = uncond_embeddings.view(batch_size * num_images_per_prompt , __a , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__UpperCAmelCase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__UpperCAmelCase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__UpperCAmelCase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__UpperCAmelCase = torch.randn(__a , generator=__a , device='''cpu''' , dtype=__a ).to(
self.device )
else:
__UpperCAmelCase = torch.randn(__a , generator=__a , device=self.device , dtype=__a )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
__UpperCAmelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__a )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__UpperCAmelCase = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__UpperCAmelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__UpperCAmelCase = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__UpperCAmelCase = {}
if accepts_eta:
__UpperCAmelCase = eta
for i, t in enumerate(self.progress_bar(__a ) ):
# expand the latents if we are doing classifier free guidance
__UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__UpperCAmelCase = self.scheduler.scale_model_input(__a , __a )
# predict the noise residual
__UpperCAmelCase = self.unet(__a , __a , encoder_hidden_states=__a ).sample
# perform guidance
if do_classifier_free_guidance:
__UpperCAmelCase , __UpperCAmelCase = noise_pred.chunk(2 )
__UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__UpperCAmelCase = self.scheduler.step(__a , __a , __a , **__a ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__a , __a , __a )
__UpperCAmelCase = 1 / 0.1_8_2_1_5 * latents
__UpperCAmelCase = self.vae.decode(__a ).sample
__UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__UpperCAmelCase = self.numpy_to_pil(__a )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__a , nsfw_content_detected=__a )
| 714 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : Optional[Any] = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 654 | 0 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch '''
'''helper utility that will spawn up '''
'''multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=UpperCamelCase__ , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=UpperCamelCase__ , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=UpperCamelCase__ )
return parser.parse_args()
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = parse_args()
# Import training_script as a module.
__UpperCAmelCase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__UpperCAmelCase = script_fpath.stem
__UpperCAmelCase = importlib.import_module(UpperCamelCase__ )
# Patch sys.argv
__UpperCAmelCase = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 715 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , ):
"""simple docstring"""
__UpperCAmelCase = {}
if train_file is not None:
__UpperCAmelCase = [train_file]
if eval_file is not None:
__UpperCAmelCase = [eval_file]
if test_file is not None:
__UpperCAmelCase = [test_file]
__UpperCAmelCase = datasets.load_dataset('''csv''' , data_files=UpperCamelCase__ )
__UpperCAmelCase = list(ds[list(files.keys() )[0]].features.keys() )
__UpperCAmelCase = features_name.pop(UpperCamelCase__ )
__UpperCAmelCase = list(set(ds[list(files.keys() )[0]][label_name] ) )
__UpperCAmelCase = {label: i for i, label in enumerate(UpperCamelCase__ )}
__UpperCAmelCase = tokenizer.model_input_names
__UpperCAmelCase = {}
if len(UpperCamelCase__ ) == 1:
for k in files.keys():
__UpperCAmelCase = ds[k].map(
lambda UpperCamelCase__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' ) , batched=UpperCamelCase__ , )
elif len(UpperCamelCase__ ) == 2:
for k in files.keys():
__UpperCAmelCase = ds[k].map(
lambda UpperCamelCase__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' , ) , batched=UpperCamelCase__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names}
__UpperCAmelCase = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names}
__UpperCAmelCase = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names}
__UpperCAmelCase = labelaid[ex[label_name]]
yield (d, label)
__UpperCAmelCase = (
tf.data.Dataset.from_generator(
UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__UpperCAmelCase = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__UpperCAmelCase = (
tf.data.Dataset.from_generator(
UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__UpperCAmelCase = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__UpperCAmelCase = (
tf.data.Dataset.from_generator(
UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__UpperCAmelCase = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__lowerCAmelCase : List[Any] = logging.getLogger(__name__)
@dataclass
class A :
a_ = field(metadata={'''help''': '''Which column contains the label'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the training file'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the development file'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the test file'''} )
a_ = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class A :
a_ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def lowerCAmelCase ( ):
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
f"""16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=UpperCamelCase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(UpperCamelCase__ ) , labelaid=UpperCamelCase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__UpperCAmelCase = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , )
def compute_metrics(UpperCamelCase__ : EvalPrediction ) -> Dict:
__UpperCAmelCase = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__UpperCAmelCase = TFTrainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=UpperCamelCase__ , eval_dataset=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCAmelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__UpperCAmelCase = trainer.evaluate()
__UpperCAmelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(UpperCamelCase__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
results.update(UpperCamelCase__ )
return results
if __name__ == "__main__":
main()
| 654 | 0 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class A :
def __init__( self : Optional[int] , __a : Any , __a : List[Any]=1_3 , __a : Any=7 , __a : str=True , __a : Optional[Any]=True , __a : Optional[Any]=True , __a : Optional[int]=True , __a : Optional[Any]=9_9 , __a : Any=6_4 , __a : Any=3_2 , __a : int=5 , __a : int=4 , __a : List[Any]=3_7 , __a : Optional[int]="gelu" , __a : Optional[Any]=0.1 , __a : Union[str, Any]=0.1 , __a : str=5_1_2 , __a : List[Any]=1_6 , __a : Optional[int]=2 , __a : int=0.0_2 , __a : Dict=3 , __a : Optional[Any]=4 , __a : Any=None , ) -> str:
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = seq_length
__UpperCAmelCase = is_training
__UpperCAmelCase = use_input_mask
__UpperCAmelCase = use_token_type_ids
__UpperCAmelCase = use_labels
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = embedding_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = type_sequence_label_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = num_labels
__UpperCAmelCase = num_choices
__UpperCAmelCase = scope
def snake_case__ ( self : Optional[Any] ) -> Union[str, Any]:
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase = None
if self.use_input_mask:
__UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase = None
if self.use_token_type_ids:
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self : List[str] ) -> Dict:
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
def snake_case__ ( self : List[Any] , __a : Any , __a : Optional[Any] , __a : List[Any] , __a : Dict , __a : int , __a : Optional[int] , __a : Tuple ) -> Union[str, Any]:
__UpperCAmelCase = MegatronBertModel(config=__a )
model.to(__a )
model.eval()
__UpperCAmelCase = model(__a , attention_mask=__a , token_type_ids=__a )
__UpperCAmelCase = model(__a , token_type_ids=__a )
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case__ ( self : int , __a : Optional[Any] , __a : int , __a : List[str] , __a : Union[str, Any] , __a : Tuple , __a : Any , __a : List[Any] ) -> Optional[int]:
__UpperCAmelCase = MegatronBertForMaskedLM(config=__a )
model.to(__a )
model.eval()
__UpperCAmelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : Optional[Any] , __a : Optional[int] , __a : Optional[int] , __a : Any , __a : Any , __a : Tuple , __a : int , __a : Optional[int] ) -> List[Any]:
__UpperCAmelCase = MegatronBertForCausalLM(config=__a )
model.to(__a )
model.eval()
__UpperCAmelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : Any , __a : Tuple , __a : Tuple , __a : List[str] , __a : Optional[int] , __a : Union[str, Any] , __a : Union[str, Any] , __a : int ) -> Tuple:
__UpperCAmelCase = MegatronBertForNextSentencePrediction(config=__a )
model.to(__a )
model.eval()
__UpperCAmelCase = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def snake_case__ ( self : List[Any] , __a : Tuple , __a : List[str] , __a : Tuple , __a : Tuple , __a : Any , __a : Optional[Any] , __a : int ) -> List[str]:
__UpperCAmelCase = MegatronBertForPreTraining(config=__a )
model.to(__a )
model.eval()
__UpperCAmelCase = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , next_sentence_label=__a , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def snake_case__ ( self : Dict , __a : List[str] , __a : Optional[Any] , __a : Tuple , __a : str , __a : Dict , __a : Optional[int] , __a : Any ) -> List[str]:
__UpperCAmelCase = MegatronBertForQuestionAnswering(config=__a )
model.to(__a )
model.eval()
__UpperCAmelCase = model(
__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self : str , __a : Dict , __a : List[Any] , __a : str , __a : Union[str, Any] , __a : List[str] , __a : Any , __a : List[str] ) -> int:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = MegatronBertForSequenceClassification(__a )
model.to(__a )
model.eval()
__UpperCAmelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : Union[str, Any] , __a : List[Any] , __a : List[Any] , __a : Any , __a : List[str] , __a : Optional[int] , __a : Optional[int] , __a : Any ) -> str:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = MegatronBertForTokenClassification(config=__a )
model.to(__a )
model.eval()
__UpperCAmelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : Optional[int] , __a : Any , __a : List[Any] , __a : Any , __a : Dict , __a : str , __a : Union[str, Any] , __a : Union[str, Any] ) -> List[str]:
__UpperCAmelCase = self.num_choices
__UpperCAmelCase = MegatronBertForMultipleChoice(config=__a )
model.to(__a )
model.eval()
__UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__ ( self : str ) -> str:
__UpperCAmelCase = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) = config_and_inputs
__UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class A ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
a_ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
a_ = (
{
'''feature-extraction''': MegatronBertModel,
'''fill-mask''': MegatronBertForMaskedLM,
'''question-answering''': MegatronBertForQuestionAnswering,
'''text-classification''': MegatronBertForSequenceClassification,
'''text-generation''': MegatronBertForCausalLM,
'''token-classification''': MegatronBertForTokenClassification,
'''zero-shot''': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ = True
# test_resize_embeddings = False
a_ = False
def snake_case__ ( self : Any , __a : List[Any] , __a : Optional[int] , __a : Optional[int]=False ) -> Any:
__UpperCAmelCase = super()._prepare_for_class(__a , __a , return_labels=__a )
if return_labels:
if model_class in get_values(__a ):
__UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__a )
__UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__a )
return inputs_dict
def snake_case__ ( self : List[str] ) -> List[str]:
__UpperCAmelCase = MegatronBertModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=__a , hidden_size=3_7 )
def snake_case__ ( self : List[Any] ) -> Optional[int]:
self.config_tester.run_common_tests()
def snake_case__ ( self : List[str] ) -> int:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__a )
def snake_case__ ( self : Any ) -> Union[str, Any]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__a )
def snake_case__ ( self : str ) -> List[Any]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__a )
def snake_case__ ( self : Optional[int] ) -> Dict:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__a )
def snake_case__ ( self : Tuple ) -> Optional[int]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__a )
def snake_case__ ( self : Any ) -> List[str]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__a )
def snake_case__ ( self : Dict ) -> List[Any]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__a )
def snake_case__ ( self : List[str] ) -> Union[str, Any]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__a )
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
return torch.tensor(
UpperCamelCase__ , dtype=torch.long , device=UpperCamelCase__ , )
__lowerCAmelCase : Optional[Any] = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class A ( unittest.TestCase ):
@slow
@unittest.skip('''Model is not available.''' )
def snake_case__ ( self : Optional[int] ) -> Union[str, Any]:
__UpperCAmelCase = '''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
__UpperCAmelCase = os.path.join(os.environ['''MYDIR'''] , __a )
__UpperCAmelCase = MegatronBertModel.from_pretrained(__a )
model.to(__a )
model.half()
__UpperCAmelCase = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
__UpperCAmelCase = model(__a )[0]
__UpperCAmelCase = torch.Size((1, 9, 1_0_2_4) )
self.assertEqual(output.shape , __a )
__UpperCAmelCase = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8]
for ii in range(3 ):
for jj in range(3 ):
__UpperCAmelCase = output[0, ii, jj]
__UpperCAmelCase = expected[3 * ii + jj]
__UpperCAmelCase = '''ii={} jj={} a={} b={}'''.format(__a , __a , __a , __a )
self.assertTrue(math.isclose(__a , __a , rel_tol=__a , abs_tol=__a ) , msg=__a )
| 716 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class A :
def __init__( self : List[Any] , __a : Any , ) -> Dict:
__UpperCAmelCase = parent
__UpperCAmelCase = 1_3
__UpperCAmelCase = 7
__UpperCAmelCase = True
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = True
__UpperCAmelCase = 9_9
__UpperCAmelCase = 3_2
__UpperCAmelCase = 2
__UpperCAmelCase = 4
__UpperCAmelCase = 3_7
__UpperCAmelCase = '''gelu'''
__UpperCAmelCase = 0.1
__UpperCAmelCase = 0.1
__UpperCAmelCase = 5_1_2
__UpperCAmelCase = 1_6
__UpperCAmelCase = 2
__UpperCAmelCase = 0.0_2
__UpperCAmelCase = 3
__UpperCAmelCase = 4
__UpperCAmelCase = None
def snake_case__ ( self : Optional[int] ) -> Dict:
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase = None
if self.use_input_mask:
__UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self : Union[str, Any] , __a : List[str] , __a : int , __a : Union[str, Any] , __a : Union[str, Any] , __a : List[Any] , __a : int ) -> Any:
__UpperCAmelCase = TFDistilBertModel(config=__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
__UpperCAmelCase = [input_ids, input_mask]
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : Tuple , __a : List[Any] , __a : int , __a : Tuple , __a : List[Any] , __a : Union[str, Any] , __a : List[Any] ) -> int:
__UpperCAmelCase = TFDistilBertForMaskedLM(config=__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : Optional[int] , __a : Any , __a : Union[str, Any] , __a : Optional[int] , __a : int , __a : Optional[Any] , __a : Optional[int] ) -> Dict:
__UpperCAmelCase = TFDistilBertForQuestionAnswering(config=__a )
__UpperCAmelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self : Any , __a : Optional[Any] , __a : List[str] , __a : Dict , __a : Dict , __a : int , __a : List[Any] ) -> Dict:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = TFDistilBertForSequenceClassification(__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : Union[str, Any] , __a : int , __a : str , __a : Union[str, Any] , __a : Optional[int] , __a : List[str] , __a : Dict ) -> str:
__UpperCAmelCase = self.num_choices
__UpperCAmelCase = TFDistilBertForMultipleChoice(__a )
__UpperCAmelCase = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__ ( self : int , __a : Optional[Any] , __a : int , __a : Tuple , __a : int , __a : Optional[int] , __a : Optional[int] ) -> int:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = TFDistilBertForTokenClassification(__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : str ) -> Any:
__UpperCAmelCase = self.prepare_config_and_inputs()
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) = config_and_inputs
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class A ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
a_ = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
a_ = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a_ = False
a_ = False
def snake_case__ ( self : Any ) -> Any:
__UpperCAmelCase = TFDistilBertModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=__a , dim=3_7 )
def snake_case__ ( self : List[Any] ) -> Optional[int]:
self.config_tester.run_common_tests()
def snake_case__ ( self : Any ) -> str:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__a )
def snake_case__ ( self : Tuple ) -> Dict:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__a )
def snake_case__ ( self : Union[str, Any] ) -> Any:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__a )
def snake_case__ ( self : Optional[Any] ) -> Dict:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__a )
def snake_case__ ( self : Any ) -> int:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__a )
def snake_case__ ( self : List[str] ) -> List[Any]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__a )
@slow
def snake_case__ ( self : Dict ) -> Tuple:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
__UpperCAmelCase = TFDistilBertModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_tf
class A ( unittest.TestCase ):
@slow
def snake_case__ ( self : int ) -> Dict:
__UpperCAmelCase = TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
__UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCAmelCase = model(__a )[0]
__UpperCAmelCase = [1, 6, 7_6_8]
self.assertEqual(output.shape , __a )
__UpperCAmelCase = tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-4 )
| 654 | 0 |
'''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
__UpperCAmelCase = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
__UpperCAmelCase = 1
if upper_limit > 0:
__UpperCAmelCase = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(UpperCamelCase__ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("\n********* Catalan Numbers Using Dynamic Programming ************\n")
print("\n*** Enter -1 at any time to quit ***")
print("\nEnter the upper limit (≥ 0) for the Catalan number sequence: ", end="")
try:
while True:
__lowerCAmelCase : int = int(input().strip())
if N < 0:
print("\n********* Goodbye!! ************")
break
else:
print(F"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print("Try another upper limit for the sequence: ", end="")
except (NameError, ValueError):
print("\n********* Invalid input, goodbye! ************\n")
import doctest
doctest.testmod()
| 717 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__lowerCAmelCase : List[Any] = {
"configuration_audio_spectrogram_transformer": [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ASTConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ASTForAudioClassification",
"ASTModel",
"ASTPreTrainedModel",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] = ["ASTFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 654 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class A ( unittest.TestCase ):
def __init__( self : int , __a : List[str] , __a : Optional[int]=7 , __a : List[str]=3 , __a : Dict=1_8 , __a : Optional[int]=3_0 , __a : Optional[Any]=4_0_0 , __a : List[str]=True , __a : Union[str, Any]=None , __a : List[Any]=True , __a : Optional[int]=None , __a : int=True , ) -> Tuple:
__UpperCAmelCase = size if size is not None else {'''shortest_edge''': 2_0}
__UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8}
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = image_size
__UpperCAmelCase = min_resolution
__UpperCAmelCase = max_resolution
__UpperCAmelCase = do_resize
__UpperCAmelCase = size
__UpperCAmelCase = do_center_crop
__UpperCAmelCase = crop_size
__UpperCAmelCase = do_flip_channel_order
def snake_case__ ( self : Union[str, Any] ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class A ( UpperCAmelCase , unittest.TestCase ):
a_ = MobileViTImageProcessor if is_vision_available() else None
def snake_case__ ( self : List[str] ) -> int:
__UpperCAmelCase = MobileViTImageProcessingTester(self )
@property
def snake_case__ ( self : Dict ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self : List[str] ) -> Tuple:
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , '''do_resize''' ) )
self.assertTrue(hasattr(__a , '''size''' ) )
self.assertTrue(hasattr(__a , '''do_center_crop''' ) )
self.assertTrue(hasattr(__a , '''center_crop''' ) )
self.assertTrue(hasattr(__a , '''do_flip_channel_order''' ) )
def snake_case__ ( self : Optional[Any] ) -> Union[str, Any]:
__UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 2_0} )
self.assertEqual(image_processor.crop_size , {'''height''': 1_8, '''width''': 1_8} )
__UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2} )
self.assertEqual(image_processor.crop_size , {'''height''': 8_4, '''width''': 8_4} )
def snake_case__ ( self : Dict ) -> List[str]:
pass
def snake_case__ ( self : List[Any] ) -> int:
# Initialize image_processing
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
__UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__UpperCAmelCase = image_processing(__a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case__ ( self : Optional[int] ) -> List[str]:
# Initialize image_processing
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input
__UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__UpperCAmelCase = image_processing(__a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case__ ( self : Tuple ) -> Union[str, Any]:
# Initialize image_processing
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
__UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__UpperCAmelCase = image_processing(__a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 718 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
class A ( UpperCAmelCase ):
a_ = '''bert-generation'''
def __init__( self : str , __a : str=5_0_3_5_8 , __a : int=1_0_2_4 , __a : Optional[Any]=2_4 , __a : Any=1_6 , __a : int=4_0_9_6 , __a : Any="gelu" , __a : Union[str, Any]=0.1 , __a : Any=0.1 , __a : Union[str, Any]=5_1_2 , __a : int=0.0_2 , __a : str=1e-12 , __a : List[str]=0 , __a : Optional[int]=2 , __a : Tuple=1 , __a : str="absolute" , __a : Optional[Any]=True , **__a : Tuple , ) -> Any:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = position_embedding_type
__UpperCAmelCase = use_cache
| 654 | 0 |
'''simple docstring'''
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class A ( unittest.TestCase ):
a_ = JukeboxTokenizer
a_ = {
'''artist''': '''Zac Brown Band''',
'''genres''': '''Country''',
'''lyrics''': '''I met a traveller from an antique land,
Who said "Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
''',
}
@require_torch
def snake_case__ ( self : Optional[int] ) -> Any:
import torch
__UpperCAmelCase = JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''' )
__UpperCAmelCase = tokenizer(**self.metas )['''input_ids''']
# fmt: off
__UpperCAmelCase = [
torch.tensor([[
0, 0, 0, 7_1_6_9, 5_0_7, 9, 7_6, 3_9, 3_1, 4_6, 7_6, 2_7,
7_6, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8, 3_1, 4_4, 7_6, 3_2,
4_4, 4_1, 3_9, 7_6, 2_7, 4_0, 7_6, 2_7, 4_0, 4_6, 3_5, 4_3,
4_7, 3_1, 7_6, 3_8, 2_7, 4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 4_1, 7_6, 4_5, 2_7, 3_5,
3_0, 7_6, 7_1, 2_0, 4_9, 4_1, 7_6, 4_8, 2_7, 4_5, 4_6, 7_6,
2_7, 4_0, 3_0, 7_6, 4_6, 4_4, 4_7, 4_0, 3_7, 3_8, 3_1, 4_5,
4_5, 7_6, 3_8, 3_1, 3_3, 4_5, 7_6, 4_1, 3_2, 7_6, 4_5, 4_6,
4_1, 4_0, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
1_9, 4_6, 2_7, 4_0, 3_0, 7_6, 3_5, 4_0, 7_6, 4_6, 3_4, 3_1,
7_6, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3, 7_6, 6_3, 7_6, 6_3,
7_6, 6_3, 7_6, 1_4, 3_1, 2_7, 4_4, 7_6, 4_6, 3_4, 3_1, 3_9,
6_4, 7_6, 4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_5, 2_7, 4_0,
3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 8,
2_7, 3_8, 3_2, 7_6, 4_5, 4_7, 4_0, 3_7, 7_6, 2_7, 7_6, 4_5,
3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0, 7_6, 4_8, 3_5, 4_5,
2_7, 3_3, 3_1, 7_6, 3_8, 3_5, 3_1, 4_5, 6_4, 7_6, 4_9, 3_4,
4_1, 4_5, 3_1, 7_6, 3_2, 4_4, 4_1, 4_9, 4_0, 6_4, 7_8, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6, 4_9,
4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_6, 3_8, 3_5, 4_2, 6_4,
7_6, 2_7, 4_0, 3_0, 7_6, 4_5, 4_0, 3_1, 3_1, 4_4, 7_6, 4_1,
3_2, 7_6, 2_9, 4_1, 3_8, 3_0, 7_6, 2_9, 4_1, 3_9, 3_9, 2_7,
4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
2_0, 3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_5, 4_6,
4_5, 7_6, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6, 4_1, 4_4, 7_6, 4_9,
3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 4_1, 4_5, 3_1, 7_6, 4_2, 2_7,
4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_6, 4_4, 3_1, 2_7, 3_0, 7_8,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 3_5, 2_9,
3_4, 7_6, 5_1, 3_1, 4_6, 7_6, 4_5, 4_7, 4_4, 4_8, 3_5, 4_8,
3_1, 6_4, 7_6, 4_5, 4_6, 2_7, 3_9, 4_2, 3_1, 3_0, 7_6, 4_1,
4_0, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 3_8, 3_5, 3_2, 3_1,
3_8, 3_1, 4_5, 4_5, 7_6, 4_6, 3_4, 3_5, 4_0, 3_3, 4_5, 6_4,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_0, 3_4, 3_1,
7_6, 3_4, 2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_9,
4_1, 2_9, 3_7, 3_1, 3_0, 7_6, 4_6, 3_4, 3_1, 3_9, 6_4, 7_6,
2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_4, 3_1, 2_7, 4_4,
4_6, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_2, 3_1, 3_0, 6_6, 7_8,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6,
4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_2, 3_1, 3_0, 3_1, 4_5,
4_6, 2_7, 3_8, 6_4, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 4_9,
4_1, 4_4, 3_0, 4_5, 7_6, 2_7, 4_2, 4_2, 3_1, 2_7, 4_4, 6_5,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_3, 5_1, 7_6,
4_0, 2_7, 3_9, 3_1, 7_6, 3_5, 4_5, 7_6, 1_5, 5_2, 5_1, 3_9,
2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_6, 1_1, 3_5, 4_0, 3_3,
7_6, 4_1, 3_2, 7_6, 1_1, 3_5, 4_0, 3_3, 4_5, 6_6, 7_8, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_2, 4_1, 4_1, 3_7, 7_6,
4_1, 4_0, 7_6, 3_9, 5_1, 7_6, 2_3, 4_1, 4_4, 3_7, 4_5, 6_4,
7_6, 5_1, 3_1, 7_6, 1_3, 3_5, 3_3, 3_4, 4_6, 5_1, 6_4, 7_6,
2_7, 4_0, 3_0, 7_6, 3_0, 3_1, 4_5, 4_2, 2_7, 3_5, 4_4, 6_7,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_4, 4_1, 4_6,
3_4, 3_5, 4_0, 3_3, 7_6, 2_8, 3_1, 4_5, 3_5, 3_0, 3_1, 7_6,
4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3, 7_6, 1_8, 4_1, 4_7,
4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_0, 3_1, 2_9, 2_7, 5_1,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_5, 3_2, 7_6,
4_6, 3_4, 2_7, 4_6, 7_6, 2_9, 4_1, 3_8, 4_1, 4_5, 4_5, 2_7,
3_8, 7_6, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4, 7_6, 2_8, 4_1, 4_7,
4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_6, 2_7, 4_0, 3_0, 7_6, 2_8,
2_7, 4_4, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
2_0, 3_4, 3_1, 7_6, 3_8, 4_1, 4_0, 3_1, 7_6, 2_7, 4_0, 3_0,
7_6, 3_8, 3_1, 4_8, 3_1, 3_8, 7_6, 4_5, 2_7, 4_0, 3_0, 4_5,
7_6, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4, 7_6, 3_2, 2_7, 4_4,
7_6, 2_7, 4_9, 2_7, 5_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
7_6, 7_6]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def snake_case__ ( self : List[str] ) -> Optional[Any]:
import torch
__UpperCAmelCase = JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''' )
__UpperCAmelCase = tokenizer(**self.metas )['''input_ids''']
# fmt: off
__UpperCAmelCase = [
torch.tensor([[
0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1, 9, 7_7, 3_9,
3_1, 4_6, 7_7, 2_7, 7_7, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8,
3_1, 4_4, 7_7, 3_2, 4_4, 4_1, 3_9, 7_7, 2_7, 4_0, 7_7, 2_7,
4_0, 4_6, 3_5, 4_3, 4_7, 3_1, 7_7, 3_8, 2_7, 4_0, 3_0, 6_4,
7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 2_3, 3_4, 4_1,
7_7, 4_5, 2_7, 3_5, 3_0, 7_7, 7_2, 2_0, 4_9, 4_1, 7_7, 4_8,
2_7, 4_5, 4_6, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 4_4, 4_7, 4_0,
3_7, 3_8, 3_1, 4_5, 4_5, 7_7, 3_8, 3_1, 3_3, 4_5, 7_7, 4_1,
3_2, 7_7, 4_5, 4_6, 4_1, 4_0, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 1_9, 4_6, 2_7, 4_0, 3_0, 7_7, 3_5, 4_0,
7_7, 4_6, 3_4, 3_1, 7_7, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3,
7_7, 6_3, 7_7, 6_3, 7_7, 6_3, 7_7, 1_4, 3_1, 2_7, 4_4, 7_7,
4_6, 3_4, 3_1, 3_9, 6_4, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1,
7_7, 4_5, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 8, 2_7, 3_8, 3_2, 7_7, 4_5, 4_7, 4_0, 3_7,
7_7, 2_7, 7_7, 4_5, 3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0,
7_7, 4_8, 3_5, 4_5, 2_7, 3_3, 3_1, 7_7, 3_8, 3_5, 3_1, 4_5,
6_4, 7_7, 4_9, 3_4, 4_1, 4_5, 3_1, 7_7, 3_2, 4_4, 4_1, 4_9,
4_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1,
4_0, 3_0, 7_7, 4_9, 4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_7,
3_8, 3_5, 4_2, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_5, 4_0, 3_1,
3_1, 4_4, 7_7, 4_1, 3_2, 7_7, 2_9, 4_1, 3_8, 3_0, 7_7, 2_9,
4_1, 3_9, 3_9, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 2_0, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 2_7,
4_6, 7_7, 3_5, 4_6, 4_5, 7_7, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6,
4_1, 4_4, 7_7, 4_9, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 4_1, 4_5,
3_1, 7_7, 4_2, 2_7, 4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_7, 4_4,
3_1, 2_7, 3_0, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
2_3, 3_4, 3_5, 2_9, 3_4, 7_7, 5_1, 3_1, 4_6, 7_7, 4_5, 4_7,
4_4, 4_8, 3_5, 4_8, 3_1, 6_4, 7_7, 4_5, 4_6, 2_7, 3_9, 4_2,
3_1, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 4_5, 3_1, 7_7,
3_8, 3_5, 3_2, 3_1, 3_8, 3_1, 4_5, 4_5, 7_7, 4_6, 3_4, 3_5,
4_0, 3_3, 4_5, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 2_0, 3_4, 3_1, 7_7, 3_4, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4,
2_7, 4_6, 7_7, 3_9, 4_1, 2_9, 3_7, 3_1, 3_0, 7_7, 4_6, 3_4,
3_1, 3_9, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7,
3_4, 3_1, 2_7, 4_4, 4_6, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 3_2,
3_1, 3_0, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
1, 4_0, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 7_7, 4_2,
3_1, 3_0, 3_1, 4_5, 4_6, 2_7, 3_8, 6_4, 7_7, 4_6, 3_4, 3_1,
4_5, 3_1, 7_7, 4_9, 4_1, 4_4, 3_0, 4_5, 7_7, 2_7, 4_2, 4_2,
3_1, 2_7, 4_4, 6_5, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_3, 5_1, 7_7, 4_0, 2_7, 3_9, 3_1, 7_7, 3_5, 4_5, 7_7,
1_5, 5_2, 5_1, 3_9, 2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_7,
1_1, 3_5, 4_0, 3_3, 7_7, 4_1, 3_2, 7_7, 1_1, 3_5, 4_0, 3_3,
4_5, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1_2,
4_1, 4_1, 3_7, 7_7, 4_1, 4_0, 7_7, 3_9, 5_1, 7_7, 2_3, 4_1,
4_4, 3_7, 4_5, 6_4, 7_7, 5_1, 3_1, 7_7, 1_3, 3_5, 3_3, 3_4,
4_6, 5_1, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 3_0, 3_1, 4_5, 4_2,
2_7, 3_5, 4_4, 6_7, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_4, 4_1, 4_6, 3_4, 3_5, 4_0, 3_3, 7_7, 2_8, 3_1, 4_5,
3_5, 3_0, 3_1, 7_7, 4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3,
7_7, 1_8, 4_1, 4_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7, 3_0,
3_1, 2_9, 2_7, 5_1, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_5, 3_2, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 2_9, 4_1, 3_8,
4_1, 4_5, 4_5, 2_7, 3_8, 7_7, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4,
7_7, 2_8, 4_1, 4_7, 4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_7, 2_7,
4_0, 3_0, 7_7, 2_8, 2_7, 4_4, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 2_0, 3_4, 3_1, 7_7, 3_8, 4_1, 4_0, 3_1,
7_7, 2_7, 4_0, 3_0, 7_7, 3_8, 3_1, 4_8, 3_1, 3_8, 7_7, 4_5,
2_7, 4_0, 3_0, 4_5, 7_7, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4,
7_7, 3_2, 2_7, 4_4, 7_7, 2_7, 4_9, 2_7, 5_1, 7_9, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 7_7, 7_7]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 719 |
'''simple docstring'''
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
__lowerCAmelCase : str = 299_792_458
# Symbols
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = symbols("ct x y z")
def lowerCAmelCase ( UpperCamelCase__ : float ):
"""simple docstring"""
if velocity > c:
raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('''Speed must be greater than or equal to 1!''' )
return velocity / c
def lowerCAmelCase ( UpperCamelCase__ : float ):
"""simple docstring"""
return 1 / sqrt(1 - beta(UpperCamelCase__ ) ** 2 )
def lowerCAmelCase ( UpperCamelCase__ : float ):
"""simple docstring"""
return np.array(
[
[gamma(UpperCamelCase__ ), -gamma(UpperCamelCase__ ) * beta(UpperCamelCase__ ), 0, 0],
[-gamma(UpperCamelCase__ ) * beta(UpperCamelCase__ ), gamma(UpperCamelCase__ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : np.ndarray | None = None ):
"""simple docstring"""
# Ensure event is not empty
if event is None:
__UpperCAmelCase = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(UpperCamelCase__ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
__lowerCAmelCase : Dict = transform(29_979_245)
print("Example of four vector: ")
print(F"""ct' = {four_vector[0]}""")
print(F"""x' = {four_vector[1]}""")
print(F"""y' = {four_vector[2]}""")
print(F"""z' = {four_vector[3]}""")
# Substitute symbols with numerical values
__lowerCAmelCase : Union[str, Any] = {ct: c, x: 1, y: 1, z: 1}
__lowerCAmelCase : Optional[int] = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F"""\n{numerical_vector}""")
| 654 | 0 |
'''simple docstring'''
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
class A ( UpperCAmelCase ):
a_ = ['''input_ids''', '''attention_mask''']
def __init__( self : Dict , __a : Union[str, Any]="</s>" , __a : Dict="<unk>" , __a : Any="<pad>" , __a : str=1_2_5 , __a : int=None , **__a : Any , ) -> None:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__UpperCAmelCase = [f"""<extra_id_{i}>""" for i in range(__a )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__UpperCAmelCase = len(set(filter(lambda __a : bool('''extra_id''' in str(__a ) ) , __a ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
''' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'''
''' extra_ids tokens''' )
__UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else pad_token
__UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else eos_token
__UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else unk_token
super().__init__(
eos_token=__a , unk_token=__a , pad_token=__a , extra_ids=__a , additional_special_tokens=__a , **__a , )
__UpperCAmelCase = extra_ids
__UpperCAmelCase = 2**8 # utf is 8 bits
# define special tokens dict
__UpperCAmelCase = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
__UpperCAmelCase = len(self.special_tokens_encoder )
__UpperCAmelCase = len(__a )
for i, token in enumerate(__a ):
__UpperCAmelCase = self.vocab_size + i - n
__UpperCAmelCase = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def snake_case__ ( self : Optional[int] ) -> Tuple:
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def snake_case__ ( self : Optional[Any] , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__a )) + [1]
return ([0] * len(__a )) + [1] + ([0] * len(__a )) + [1]
def snake_case__ ( self : Dict , __a : List[int] ) -> List[int]:
if len(__a ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
''' eos tokens being added.''' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def snake_case__ ( self : List[Any] , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def snake_case__ ( self : Dict , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase = self._add_eos_if_not_present(__a )
if token_ids_a is None:
return token_ids_a
else:
__UpperCAmelCase = self._add_eos_if_not_present(__a )
return token_ids_a + token_ids_a
def snake_case__ ( self : Tuple , __a : str ) -> List[str]:
__UpperCAmelCase = [chr(__a ) for i in text.encode('''utf-8''' )]
return tokens
def snake_case__ ( self : Tuple , __a : str ) -> Optional[Any]:
if token in self.special_tokens_encoder:
__UpperCAmelCase = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
__UpperCAmelCase = self.added_tokens_encoder[token]
elif len(__a ) != 1:
__UpperCAmelCase = self.unk_token_id
else:
__UpperCAmelCase = ord(__a ) + self._num_special_tokens
return token_id
def snake_case__ ( self : str , __a : List[str] ) -> List[Any]:
if index in self.special_tokens_decoder:
__UpperCAmelCase = self.special_tokens_decoder[index]
else:
__UpperCAmelCase = chr(index - self._num_special_tokens )
return token
def snake_case__ ( self : Tuple , __a : Dict ) -> Tuple:
__UpperCAmelCase = b''''''
for token in tokens:
if token in self.special_tokens_decoder:
__UpperCAmelCase = self.special_tokens_decoder[token].encode('''utf-8''' )
elif token in self.added_tokens_decoder:
__UpperCAmelCase = self.special_tokens_decoder[token].encode('''utf-8''' )
elif token in self.special_tokens_encoder:
__UpperCAmelCase = token.encode('''utf-8''' )
elif token in self.added_tokens_encoder:
__UpperCAmelCase = token.encode('''utf-8''' )
else:
__UpperCAmelCase = bytes([ord(__a )] )
bstring += tok_string
__UpperCAmelCase = bstring.decode('''utf-8''' , errors='''ignore''' )
return string
def snake_case__ ( self : List[Any] , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
return ()
| 720 |
'''simple docstring'''
import heapq
import sys
import numpy as np
__lowerCAmelCase : Any = tuple[int, int]
class A :
def __init__( self : Optional[int] ) -> int:
__UpperCAmelCase = []
__UpperCAmelCase = set()
def snake_case__ ( self : Optional[Any] ) -> List[Any]:
if not self.empty():
return self.elements[0][0]
else:
return float('''inf''' )
def snake_case__ ( self : Dict ) -> Optional[int]:
return len(self.elements ) == 0
def snake_case__ ( self : Optional[int] , __a : Optional[Any] , __a : Dict ) -> Optional[Any]:
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(__a )
else:
# update
# print("update", item)
__UpperCAmelCase = []
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def snake_case__ ( self : int , __a : Any ) -> int:
if item in self.set:
self.set.remove(__a )
__UpperCAmelCase = []
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def snake_case__ ( self : List[str] ) -> Dict:
return self.elements[0][1]
def snake_case__ ( self : Any ) -> List[str]:
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
self.set.remove(__a )
return (priority, item)
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ):
"""simple docstring"""
# euclidean distance
__UpperCAmelCase = np.array(UpperCamelCase__ )
__UpperCAmelCase = np.array(UpperCamelCase__ )
return np.linalg.norm(a - b )
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ):
"""simple docstring"""
# integer division by time variable
return consistent_heuristic(UpperCamelCase__ , UpperCamelCase__ ) // t
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ):
"""simple docstring"""
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : int , UpperCamelCase__ : TPos , UpperCamelCase__ : dict[TPos, float] ):
"""simple docstring"""
__UpperCAmelCase = g_function[start] + Wa * heuristics[i](UpperCamelCase__ , UpperCamelCase__ )
return ans
def lowerCAmelCase ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase = np.chararray((n, n) )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
__UpperCAmelCase = '''*'''
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
if (j, (n - 1) - i) in blocks:
__UpperCAmelCase = '''#'''
__UpperCAmelCase = '''-'''
__UpperCAmelCase = back_pointer[goal]
while x != start:
((__UpperCAmelCase) , (__UpperCAmelCase)) = x
# print(x)
__UpperCAmelCase = '''-'''
__UpperCAmelCase = back_pointer[x]
__UpperCAmelCase = '''-'''
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=''' ''' )
print('''<-- End position''' , end=''' ''' )
else:
print(grid[i][j] , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
print('''PATH TAKEN BY THE ALGORITHM IS:-''' )
__UpperCAmelCase = back_pointer[goal]
while x != start:
print(UpperCamelCase__ , end=''' ''' )
__UpperCAmelCase = back_pointer[x]
print(UpperCamelCase__ )
sys.exit()
def lowerCAmelCase ( UpperCamelCase__ : TPos ):
"""simple docstring"""
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , ):
"""simple docstring"""
for itera in range(UpperCamelCase__ ):
open_list[itera].remove_element(UpperCamelCase__ )
# print("s", s)
# print("j", j)
((__UpperCAmelCase) , (__UpperCAmelCase)) = s
__UpperCAmelCase = (x - 1, y)
__UpperCAmelCase = (x + 1, y)
__UpperCAmelCase = (x, y + 1)
__UpperCAmelCase = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(UpperCamelCase__ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(UpperCamelCase__ )
__UpperCAmelCase = -1
__UpperCAmelCase = float('''inf''' )
if valid(UpperCamelCase__ ) and g_function[neighbours] > g_function[s] + 1:
__UpperCAmelCase = g_function[s] + 1
__UpperCAmelCase = s
if neighbours not in close_list_anchor:
open_list[0].put(UpperCamelCase__ , key(UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ ) )
if neighbours not in close_list_inad:
for var in range(1 , UpperCamelCase__ ):
if key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) <= Wa * key(
UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ ):
open_list[j].put(
UpperCamelCase__ , key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(1_5 , 2_0 ):
some_list.append((x, 1_7) )
for x in range(1_0 , 1_9 ):
for y in range(1 , 1_5 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(1_2 , 1_9 ):
some_list.append((x, y) )
for x in range(3 , 1_3 ):
for y in range(1_6 , 1_9 ):
some_list.append((x, y) )
return some_list
__lowerCAmelCase : Optional[Any] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
__lowerCAmelCase : List[Any] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
__lowerCAmelCase : Dict = make_common_ground()
__lowerCAmelCase : int = blocks_blk
# hyper parameters
__lowerCAmelCase : Dict = 1
__lowerCAmelCase : List[str] = 1
__lowerCAmelCase : Union[str, Any] = 20
__lowerCAmelCase : Any = 3 # one consistent and two other inconsistent
# start and end destination
__lowerCAmelCase : Optional[Any] = (0, 0)
__lowerCAmelCase : Any = (n - 1, n - 1)
__lowerCAmelCase : Optional[int] = 1
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = {start: 0, goal: float('''inf''' )}
__UpperCAmelCase = {start: -1, goal: -1}
__UpperCAmelCase = []
__UpperCAmelCase = set()
for i in range(UpperCamelCase__ ):
open_list.append(PriorityQueue() )
open_list[i].put(UpperCamelCase__ , key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
__UpperCAmelCase = []
__UpperCAmelCase = []
while open_list[0].minkey() < float('''inf''' ):
for i in range(1 , UpperCamelCase__ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('''inf''' ):
do_something(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
__UpperCAmelCase , __UpperCAmelCase = open_list[i].top_show()
visited.add(UpperCamelCase__ )
expand_state(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
close_list_inad.append(UpperCamelCase__ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('''inf''' ):
do_something(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
__UpperCAmelCase = open_list[0].top_show()
visited.add(UpperCamelCase__ )
expand_state(
UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
close_list_anchor.append(UpperCamelCase__ )
print('''No path found to goal''' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(UpperCamelCase__ ):
if (j, i) in blocks:
print('''#''' , end=''' ''' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('''*''' , end=''' ''' )
else:
print('''-''' , end=''' ''' )
else:
print('''*''' , end=''' ''' )
if (j, i) == (n - 1, n - 1):
print('''<-- End position''' , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 654 | 0 |
'''simple docstring'''
__lowerCAmelCase : List[str] = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
def lowerCAmelCase ( UpperCamelCase__ : dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase = set()
# keep track of all the paths to be checked
__UpperCAmelCase = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
__UpperCAmelCase = queue.pop(0 )
# get the last node from the path
__UpperCAmelCase = path[-1]
if node not in explored:
__UpperCAmelCase = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
__UpperCAmelCase = list(UpperCamelCase__ )
new_path.append(UpperCamelCase__ )
queue.append(UpperCamelCase__ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(UpperCamelCase__ )
# in case there's no path between the 2 nodes
return []
def lowerCAmelCase ( UpperCamelCase__ : dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ):
"""simple docstring"""
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
__UpperCAmelCase = [start]
__UpperCAmelCase = set(UpperCamelCase__ )
# Keep tab on distances from `start` node.
__UpperCAmelCase = {start: 0, target: -1}
while queue:
__UpperCAmelCase = queue.pop(0 )
if node == target:
__UpperCAmelCase = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(UpperCamelCase__ )
queue.append(UpperCamelCase__ )
__UpperCAmelCase = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, "G", "D")) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, "G", "D")) # returns 4
| 721 |
'''simple docstring'''
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
__lowerCAmelCase : List[Any] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__lowerCAmelCase : str = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
__lowerCAmelCase : int = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
__lowerCAmelCase : List[Any] = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__lowerCAmelCase : List[str] = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
__lowerCAmelCase : Optional[int] = [
("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"),
("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"),
("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"),
("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"),
("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"),
("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"),
("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"),
("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"),
("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"),
(
"zero-shot-object-detection",
"MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES",
"AutoModelForZeroShotObjectDetection",
),
("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"),
("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"),
("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"),
("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"),
(
"table-question-answering",
"MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForTableQuestionAnswering",
),
("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"),
("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"),
(
"next-sentence-prediction",
"MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES",
"AutoModelForNextSentencePrediction",
),
(
"audio-frame-classification",
"MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForAudioFrameClassification",
),
("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"),
(
"document-question-answering",
"MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForDocumentQuestionAnswering",
),
(
"visual-question-answering",
"MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForVisualQuestionAnswering",
),
("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"),
(
"zero-shot-image-classification",
"MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForZeroShotImageClassification",
),
("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"),
("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"),
("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"),
]
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , UpperCamelCase__ )
return [m.group(0 ) for m in matches]
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__UpperCAmelCase = {
config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
__UpperCAmelCase = collections.defaultdict(UpperCamelCase__ )
__UpperCAmelCase = collections.defaultdict(UpperCamelCase__ )
__UpperCAmelCase = collections.defaultdict(UpperCamelCase__ )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(UpperCamelCase__ ):
__UpperCAmelCase = None
if _re_tf_models.match(UpperCamelCase__ ) is not None:
__UpperCAmelCase = tf_models
__UpperCAmelCase = _re_tf_models.match(UpperCamelCase__ ).groups()[0]
elif _re_flax_models.match(UpperCamelCase__ ) is not None:
__UpperCAmelCase = flax_models
__UpperCAmelCase = _re_flax_models.match(UpperCamelCase__ ).groups()[0]
elif _re_pt_models.match(UpperCamelCase__ ) is not None:
__UpperCAmelCase = pt_models
__UpperCAmelCase = _re_pt_models.match(UpperCamelCase__ ).groups()[0]
if lookup_dict is not None:
while len(UpperCamelCase__ ) > 0:
if attr_name in model_prefix_to_model_type:
__UpperCAmelCase = True
break
# Try again after removing the last word in the name
__UpperCAmelCase = ''''''.join(camel_case_split(UpperCamelCase__ )[:-1] )
__UpperCAmelCase = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
__UpperCAmelCase = list(UpperCamelCase__ )
all_models.sort()
__UpperCAmelCase = {'''model_type''': all_models}
__UpperCAmelCase = [pt_models[t] for t in all_models]
__UpperCAmelCase = [tf_models[t] for t in all_models]
__UpperCAmelCase = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
__UpperCAmelCase = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
__UpperCAmelCase = '''AutoProcessor'''
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
__UpperCAmelCase = '''AutoTokenizer'''
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
__UpperCAmelCase = '''AutoFeatureExtractor'''
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
__UpperCAmelCase = '''AutoTokenizer'''
__UpperCAmelCase = [processors[t] for t in all_models]
return pd.DataFrame(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
__UpperCAmelCase = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""]
__UpperCAmelCase = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
# The type of pipeline may not exist in this framework
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ):
continue
# First extract all model_names
__UpperCAmelCase = []
for name in getattr(UpperCamelCase__ , UpperCamelCase__ ).values():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
model_names.append(UpperCamelCase__ )
else:
model_names.extend(list(UpperCamelCase__ ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def lowerCAmelCase ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ):
"""simple docstring"""
__UpperCAmelCase = get_frameworks_table()
__UpperCAmelCase = Dataset.from_pandas(UpperCamelCase__ )
__UpperCAmelCase = hf_hub_download(
'''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=UpperCamelCase__ )
__UpperCAmelCase = Dataset.from_json(UpperCamelCase__ )
__UpperCAmelCase = {
tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class'''])
for i in range(len(UpperCamelCase__ ) )
}
__UpperCAmelCase = update_pipeline_and_auto_class_table(UpperCamelCase__ )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
__UpperCAmelCase = sorted(table.keys() )
__UpperCAmelCase = pd.DataFrame(
{
'''model_class''': model_classes,
'''pipeline_tag''': [table[m][0] for m in model_classes],
'''auto_class''': [table[m][1] for m in model_classes],
} )
__UpperCAmelCase = Dataset.from_pandas(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(UpperCamelCase__ , '''frameworks.json''' ) )
tags_dataset.to_json(os.path.join(UpperCamelCase__ , '''pipeline_tags.json''' ) )
if commit_sha is not None:
__UpperCAmelCase = (
f"""Update with commit {commit_sha}\n\nSee: """
f"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
__UpperCAmelCase = '''Update'''
upload_folder(
repo_id='''huggingface/transformers-metadata''' , folder_path=UpperCamelCase__ , repo_type='''dataset''' , token=UpperCamelCase__ , commit_message=UpperCamelCase__ , )
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
__UpperCAmelCase = transformers_module.pipelines.SUPPORTED_TASKS
__UpperCAmelCase = []
for key in pipeline_tasks:
if key not in in_table:
__UpperCAmelCase = pipeline_tasks[key]['''pt''']
if isinstance(UpperCamelCase__ , (list, tuple) ):
__UpperCAmelCase = model[0]
__UpperCAmelCase = model.__name__
if model not in in_table.values():
missing.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
__UpperCAmelCase = ''', '''.join(UpperCamelCase__ )
raise ValueError(
'''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '''
f"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.")
parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.")
parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.")
__lowerCAmelCase : Tuple = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 654 | 0 |
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class A :
def __init__( self : List[Any] , __a : int , __a : Optional[Any]=sys.maxsize ) -> Union[str, Any]:
__UpperCAmelCase = '''bilinear'''
__UpperCAmelCase = max_size
__UpperCAmelCase = short_edge_length
def __call__( self : List[Any] , __a : Optional[Any] ) -> int:
__UpperCAmelCase = []
for img in imgs:
__UpperCAmelCase , __UpperCAmelCase = img.shape[:2]
# later: provide list and randomly choose index for resize
__UpperCAmelCase = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
__UpperCAmelCase = size * 1.0 / min(__a , __a )
if h < w:
__UpperCAmelCase , __UpperCAmelCase = size, scale * w
else:
__UpperCAmelCase , __UpperCAmelCase = scale * h, size
if max(__a , __a ) > self.max_size:
__UpperCAmelCase = self.max_size * 1.0 / max(__a , __a )
__UpperCAmelCase = newh * scale
__UpperCAmelCase = neww * scale
__UpperCAmelCase = int(neww + 0.5 )
__UpperCAmelCase = int(newh + 0.5 )
if img.dtype == np.uinta:
__UpperCAmelCase = Image.fromarray(__a )
__UpperCAmelCase = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
__UpperCAmelCase = np.asarray(__a )
else:
__UpperCAmelCase = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
__UpperCAmelCase = nn.functional.interpolate(
__a , (newh, neww) , mode=self.interp_method , align_corners=__a ).squeeze(0 )
img_augs.append(__a )
return img_augs
class A :
def __init__( self : Union[str, Any] , __a : Any ) -> Optional[int]:
__UpperCAmelCase = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
__UpperCAmelCase = cfg.INPUT.FORMAT
__UpperCAmelCase = cfg.SIZE_DIVISIBILITY
__UpperCAmelCase = cfg.PAD_VALUE
__UpperCAmelCase = cfg.INPUT.MAX_SIZE_TEST
__UpperCAmelCase = cfg.MODEL.DEVICE
__UpperCAmelCase = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
__UpperCAmelCase = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
__UpperCAmelCase = lambda __a : (x - self.pixel_mean) / self.pixel_std
def snake_case__ ( self : Optional[Any] , __a : Optional[Any] ) -> str:
__UpperCAmelCase = tuple(max(__a ) for s in zip(*[img.shape for img in images] ) )
__UpperCAmelCase = [im.shape[-2:] for im in images]
__UpperCAmelCase = [
nn.functional.pad(
__a , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(__a , __a )
]
return torch.stack(__a ), torch.tensor(__a )
def __call__( self : Tuple , __a : Optional[int] , __a : Union[str, Any]=False ) -> Union[str, Any]:
with torch.no_grad():
if not isinstance(__a , __a ):
__UpperCAmelCase = [images]
if single_image:
assert len(__a ) == 1
for i in range(len(__a ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(__a , images.pop(__a ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
__a , torch.as_tensor(img_tensorize(images.pop(__a ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
__UpperCAmelCase = torch.tensor([im.shape[:2] for im in images] )
__UpperCAmelCase = self.aug(__a )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
__UpperCAmelCase = [self.normalizer(__a ) for x in images]
# now pad them to do the following operations
__UpperCAmelCase , __UpperCAmelCase = self.pad(__a )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
__UpperCAmelCase = torch.true_divide(__a , __a )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any ):
"""simple docstring"""
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def lowerCAmelCase ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple[int, int] ):
"""simple docstring"""
assert torch.isfinite(UpperCamelCase__ ).all(), "Box tensor contains infinite or NaN!"
__UpperCAmelCase , __UpperCAmelCase = box_size
tensor[:, 0].clamp_(min=0 , max=UpperCamelCase__ )
tensor[:, 1].clamp_(min=0 , max=UpperCamelCase__ )
tensor[:, 2].clamp_(min=0 , max=UpperCamelCase__ )
tensor[:, 3].clamp_(min=0 , max=UpperCamelCase__ )
| 700 |
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__lowerCAmelCase : Optional[int] = [
"cross_validation.py",
"gradient_accumulation.py",
"local_sgd.py",
"multi_process_metrics.py",
"memory.py",
"automatic_gradient_accumulation.py",
"fsdp_with_peak_mem_tracking.py",
"deepspeed_with_config_support.py",
"megatron_lm_gpt_pretraining.py",
]
class A ( unittest.TestCase ):
def snake_case__ ( self : Any , __a : str , __a : bool , __a : str = None , __a : list = None ) -> Tuple:
__UpperCAmelCase = None
__UpperCAmelCase = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
__UpperCAmelCase = os.path.abspath('''examples''' )
for item in os.listdir(__a ):
if item not in EXCLUDE_EXAMPLES:
__UpperCAmelCase = os.path.join(__a , __a )
if os.path.isfile(__a ) and ".py" in item_path:
with self.subTest(
tested_script=__a , feature_script=__a , tested_section='''main()''' if parser_only else '''training_function()''' , ):
__UpperCAmelCase = compare_against_test(
os.path.join(__a , __a ) , __a , __a , __a )
__UpperCAmelCase = '''\n'''.join(__a )
if special_strings is not None:
for string in special_strings:
__UpperCAmelCase = diff.replace(__a , '''''' )
self.assertEqual(__a , '''''' )
def snake_case__ ( self : Optional[Any] ) -> str:
self.one_complete_example('''complete_nlp_example.py''' , __a )
self.one_complete_example('''complete_nlp_example.py''' , __a )
def snake_case__ ( self : List[str] ) -> Tuple:
__UpperCAmelCase = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
__UpperCAmelCase = [
''' ''' * 1_6 + '''{\n\n''',
''' ''' * 2_0 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 2_0 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 2_0 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 2_0 + '''"epoch": epoch,\n\n''',
''' ''' * 1_6 + '''},\n\n''',
''' ''' * 1_6 + '''step=epoch,\n''',
''' ''' * 1_2,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , __a , __a , __a )
self.one_complete_example('''complete_cv_example.py''' , __a , __a , __a )
@mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class A ( UpperCAmelCase ):
a_ = False
@classmethod
def snake_case__ ( cls : Tuple ) -> str:
super().setUpClass()
__UpperCAmelCase = tempfile.mkdtemp()
__UpperCAmelCase = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
__UpperCAmelCase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def snake_case__ ( cls : Dict ) -> int:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def snake_case__ ( self : Tuple ) -> Dict:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def snake_case__ ( self : str ) -> int:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
""".split()
__UpperCAmelCase = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def snake_case__ ( self : Any ) -> Any:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}
""".split()
__UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__a )
self.assertNotIn('''epoch 0:''' , __a )
self.assertIn('''epoch 1:''' , __a )
def snake_case__ ( self : Tuple ) -> Optional[int]:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}
""".split()
__UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__a )
if torch.cuda.is_available():
__UpperCAmelCase = torch.cuda.device_count()
else:
__UpperCAmelCase = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , __a )
self.assertIn('''epoch 1:''' , __a )
else:
self.assertIn('''epoch 0:''' , __a )
self.assertIn('''epoch 1:''' , __a )
@slow
def snake_case__ ( self : Any ) -> Optional[Any]:
__UpperCAmelCase = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
__UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__a )
__UpperCAmelCase = re.findall('''({.+})''' , __a )
__UpperCAmelCase = [r for r in results if '''accuracy''' in r][-1]
__UpperCAmelCase = ast.literal_eval(__a )
self.assertGreaterEqual(results['''accuracy'''] , 0.7_5 )
def snake_case__ ( self : Dict ) -> int:
__UpperCAmelCase = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Optional[Any] ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdir:
__UpperCAmelCase = f"""
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__a , '''tracking''' ) ) )
def snake_case__ ( self : Optional[int] ) -> List[Any]:
__UpperCAmelCase = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
__UpperCAmelCase = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 654 | 0 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"The `inpainting.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionInpaintPipeline` instead."
)
| 701 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__lowerCAmelCase : Any = ""
__lowerCAmelCase : int = ""
__lowerCAmelCase : Union[str, Any] = ""
__lowerCAmelCase : Any = 1 # (0 is vertical, 1 is horizontal)
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase = get_dataset(UpperCamelCase__ , UpperCamelCase__ )
print('''Processing...''' )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = update_image_and_anno(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for index, image in enumerate(UpperCamelCase__ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__UpperCAmelCase = random_chars(3_2 )
__UpperCAmelCase = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__UpperCAmelCase = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(f"""/{file_root}.jpg""" , UpperCamelCase__ , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(f"""Success {index+1}/{len(UpperCamelCase__ )} with {file_name}""" )
__UpperCAmelCase = []
for anno in new_annos[index]:
__UpperCAmelCase = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(UpperCamelCase__ )
with open(f"""/{file_root}.txt""" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase = []
__UpperCAmelCase = []
for label_file in glob.glob(os.path.join(UpperCamelCase__ , '''*.txt''' ) ):
__UpperCAmelCase = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(UpperCamelCase__ ) as in_file:
__UpperCAmelCase = in_file.readlines()
__UpperCAmelCase = os.path.join(UpperCamelCase__ , f"""{label_name}.jpg""" )
__UpperCAmelCase = []
for obj_list in obj_lists:
__UpperCAmelCase = obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(UpperCamelCase__ )
labels.append(UpperCamelCase__ )
return img_paths, labels
def lowerCAmelCase ( UpperCamelCase__ : list , UpperCamelCase__ : list , UpperCamelCase__ : int = 1 ):
"""simple docstring"""
__UpperCAmelCase = []
__UpperCAmelCase = []
__UpperCAmelCase = []
for idx in range(len(UpperCamelCase__ ) ):
__UpperCAmelCase = []
__UpperCAmelCase = img_list[idx]
path_list.append(UpperCamelCase__ )
__UpperCAmelCase = anno_list[idx]
__UpperCAmelCase = cva.imread(UpperCamelCase__ )
if flip_type == 1:
__UpperCAmelCase = cva.flip(UpperCamelCase__ , UpperCamelCase__ )
for bbox in img_annos:
__UpperCAmelCase = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
__UpperCAmelCase = cva.flip(UpperCamelCase__ , UpperCamelCase__ )
for bbox in img_annos:
__UpperCAmelCase = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(UpperCamelCase__ )
new_imgs_list.append(UpperCamelCase__ )
return new_imgs_list, new_annos_lists, path_list
def lowerCAmelCase ( UpperCamelCase__ : int = 3_2 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
__UpperCAmelCase = ascii_lowercase + digits
return "".join(random.choice(UpperCamelCase__ ) for _ in range(UpperCamelCase__ ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 654 | 0 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__lowerCAmelCase : Dict = logging.get_logger(__name__)
class A ( UpperCAmelCase ):
a_ = ['''input_features''']
def __init__( self : List[str] , __a : Union[str, Any]=8_0 , __a : List[Any]=1_6_0_0_0 , __a : Any=1_6_0 , __a : Any=3_0 , __a : Union[str, Any]=4_0_0 , __a : Tuple=0.0 , __a : Tuple=False , **__a : Any , ) -> Union[str, Any]:
super().__init__(
feature_size=__a , sampling_rate=__a , padding_value=__a , return_attention_mask=__a , **__a , )
__UpperCAmelCase = n_fft
__UpperCAmelCase = hop_length
__UpperCAmelCase = chunk_length
__UpperCAmelCase = chunk_length * sampling_rate
__UpperCAmelCase = self.n_samples // hop_length
__UpperCAmelCase = sampling_rate
__UpperCAmelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__a , min_frequency=0.0 , max_frequency=8_0_0_0.0 , sampling_rate=__a , norm='''slaney''' , mel_scale='''slaney''' , )
def snake_case__ ( self : Optional[int] , __a : np.array ) -> np.ndarray:
__UpperCAmelCase = spectrogram(
__a , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , )
__UpperCAmelCase = log_spec[:, :-1]
__UpperCAmelCase = np.maximum(__a , log_spec.max() - 8.0 )
__UpperCAmelCase = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def snake_case__ ( __a : List[np.ndarray] , __a : List[np.ndarray] , __a : float = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
__UpperCAmelCase = np.array(__a , np.intaa )
__UpperCAmelCase = []
for vector, length in zip(__a , attention_mask.sum(-1 ) ):
__UpperCAmelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
__UpperCAmelCase = padding_value
normed_input_values.append(__a )
else:
__UpperCAmelCase = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self : Tuple , __a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __a : bool = True , __a : Optional[int] = None , __a : Optional[Union[str, TensorType]] = None , __a : Optional[bool] = None , __a : Optional[str] = "max_length" , __a : Optional[int] = None , __a : Optional[int] = None , __a : Optional[bool] = None , **__a : Optional[Any] , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
__UpperCAmelCase = isinstance(__a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
__UpperCAmelCase = is_batched_numpy or (
isinstance(__a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__UpperCAmelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__a , np.ndarray ):
__UpperCAmelCase = np.asarray(__a , dtype=np.floataa )
elif isinstance(__a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__UpperCAmelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__UpperCAmelCase = [np.asarray([raw_speech] ).T]
__UpperCAmelCase = BatchFeature({'''input_features''': raw_speech} )
# convert into correct format for padding
__UpperCAmelCase = self.pad(
__a , padding=__a , max_length=max_length if max_length else self.n_samples , truncation=__a , pad_to_multiple_of=__a , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
__UpperCAmelCase = self.zero_mean_unit_var_norm(
padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , )
__UpperCAmelCase = np.stack(padded_inputs['''input_features'''] , axis=0 )
# make sure list is in array format
__UpperCAmelCase = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 )
__UpperCAmelCase = [self._np_extract_fbank_features(__a ) for waveform in input_features[0]]
if isinstance(input_features[0] , __a ):
__UpperCAmelCase = [np.asarray(__a , dtype=np.floataa ) for feature in input_features]
else:
__UpperCAmelCase = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
__UpperCAmelCase = padded_inputs['''attention_mask'''][:, :: self.hop_length]
if return_tensors is not None:
__UpperCAmelCase = padded_inputs.convert_to_tensors(__a )
return padded_inputs
def snake_case__ ( self : List[str] ) -> Dict[str, Any]:
__UpperCAmelCase = copy.deepcopy(self.__dict__ )
__UpperCAmelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 702 |
'''simple docstring'''
from pathlib import Path
import fire
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = Path(UpperCamelCase__ )
__UpperCAmelCase = Path(UpperCamelCase__ )
dest_dir.mkdir(exist_ok=UpperCamelCase__ )
for path in src_dir.iterdir():
__UpperCAmelCase = [x.rstrip() for x in list(path.open().readlines() )][:n]
__UpperCAmelCase = dest_dir.joinpath(path.name )
print(UpperCamelCase__ )
dest_path.open('''w''' ).write('''\n'''.join(UpperCamelCase__ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 654 | 0 |
'''simple docstring'''
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
__lowerCAmelCase : List[Any] = HfArgumentParser(InitializationArguments)
__lowerCAmelCase : int = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
__lowerCAmelCase : Any = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
__lowerCAmelCase : int = {
"vocab_size": len(tokenizer),
"scale_attn_by_inverse_layer_idx": True,
"reorder_and_upcast_attn": True,
}
# Load model config (GPT-2 large in this case)
__lowerCAmelCase : List[Any] = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
__lowerCAmelCase : Dict = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 703 |
'''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__UpperCAmelCase = f"""Input value of [number={number}] must be an integer"""
raise TypeError(UpperCamelCase__ )
if number < 1:
__UpperCAmelCase = f"""Input value of [number={number}] must be > 0"""
raise ValueError(UpperCamelCase__ )
__UpperCAmelCase = 1
for i in range(1 , UpperCamelCase__ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 654 | 0 |
'''simple docstring'''
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
__lowerCAmelCase : Optional[Any] = (
"This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"
)
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
warnings.warn(UpperCamelCase__ , UpperCamelCase__ )
requires_backends(UpperCamelCase__ , '''sklearn''' )
return (preds == labels).mean()
def lowerCAmelCase ( UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] ):
"""simple docstring"""
warnings.warn(UpperCamelCase__ , UpperCamelCase__ )
requires_backends(UpperCamelCase__ , '''sklearn''' )
__UpperCAmelCase = simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )
__UpperCAmelCase = fa_score(y_true=UpperCamelCase__ , y_pred=UpperCamelCase__ )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def lowerCAmelCase ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any ):
"""simple docstring"""
warnings.warn(UpperCamelCase__ , UpperCamelCase__ )
requires_backends(UpperCamelCase__ , '''sklearn''' )
__UpperCAmelCase = pearsonr(UpperCamelCase__ , UpperCamelCase__ )[0]
__UpperCAmelCase = spearmanr(UpperCamelCase__ , UpperCamelCase__ )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
warnings.warn(UpperCamelCase__ , UpperCamelCase__ )
requires_backends(UpperCamelCase__ , '''sklearn''' )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ ), f"""Predictions and labels have mismatched lengths {len(UpperCamelCase__ )} and {len(UpperCamelCase__ )}"""
if task_name == "cola":
return {"mcc": matthews_corrcoef(UpperCamelCase__ , UpperCamelCase__ )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )}
elif task_name == "mrpc":
return acc_and_fa(UpperCamelCase__ , UpperCamelCase__ )
elif task_name == "sts-b":
return pearson_and_spearman(UpperCamelCase__ , UpperCamelCase__ )
elif task_name == "qqp":
return acc_and_fa(UpperCamelCase__ , UpperCamelCase__ )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )}
elif task_name == "qnli":
return {"acc": simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )}
elif task_name == "rte":
return {"acc": simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )}
elif task_name == "wnli":
return {"acc": simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )}
elif task_name == "hans":
return {"acc": simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )}
else:
raise KeyError(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Any ):
"""simple docstring"""
warnings.warn(UpperCamelCase__ , UpperCamelCase__ )
requires_backends(UpperCamelCase__ , '''sklearn''' )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError(f"""Predictions and labels have mismatched lengths {len(UpperCamelCase__ )} and {len(UpperCamelCase__ )}""" )
if task_name == "xnli":
return {"acc": simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )}
else:
raise KeyError(UpperCamelCase__ )
| 704 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def lowerCAmelCase ( ):
"""simple docstring"""
raise RuntimeError('''CUDA out of memory.''' )
class A ( nn.Module ):
def __init__( self : Optional[Any] ) -> int:
super().__init__()
__UpperCAmelCase = nn.Linear(3 , 4 )
__UpperCAmelCase = nn.BatchNormad(4 )
__UpperCAmelCase = nn.Linear(4 , 5 )
def snake_case__ ( self : List[str] , __a : Optional[int] ) -> Optional[int]:
return self.lineara(self.batchnorm(self.lineara(__a ) ) )
class A ( unittest.TestCase ):
def snake_case__ ( self : Optional[int] ) -> Any:
__UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(__a : Union[str, Any] ):
nonlocal batch_sizes
batch_sizes.append(__a )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(__a , [1_2_8, 6_4, 3_2, 1_6, 8] )
def snake_case__ ( self : str ) -> int:
__UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(__a : str , __a : Optional[int] ):
nonlocal batch_sizes
batch_sizes.append(__a )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
__UpperCAmelCase , __UpperCAmelCase = mock_training_loop_function('''hello''' )
self.assertListEqual(__a , [1_2_8, 6_4, 3_2, 1_6, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def snake_case__ ( self : Any ) -> int:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(__a : Optional[int] ):
pass
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def snake_case__ ( self : Any ) -> List[Any]:
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(__a : Dict ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def snake_case__ ( self : List[Any] ) -> List[str]:
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(__a : str , __a : Union[str, Any] , __a : int ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(__a ) as cm:
mock_training_loop_function(1_2_8 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(__a : Tuple ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def snake_case__ ( self : Any ) -> List[Any]:
__UpperCAmelCase = torch.cuda.memory_allocated()
__UpperCAmelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , __a )
__UpperCAmelCase = release_memory(__a )
self.assertEqual(torch.cuda.memory_allocated() , __a )
| 654 | 0 |
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : float ):
"""simple docstring"""
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705 |
'''simple docstring'''
from __future__ import annotations
import math
def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = u
for i in range(1 , UpperCamelCase__ ):
__UpperCAmelCase = temp * (u - i)
return temp
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = int(input('''enter the numbers of values: ''' ) )
__UpperCAmelCase = []
for _ in range(UpperCamelCase__ ):
y.append([] )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
y[i].append(UpperCamelCase__ )
__UpperCAmelCase = 0
print('''enter the values of parameters in a list: ''' )
__UpperCAmelCase = list(map(UpperCamelCase__ , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(UpperCamelCase__ ):
__UpperCAmelCase = float(input() )
__UpperCAmelCase = int(input('''enter the value to interpolate: ''' ) )
__UpperCAmelCase = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , UpperCamelCase__ ):
for j in range(n - i ):
__UpperCAmelCase = y[j + 1][i - 1] - y[j][i - 1]
__UpperCAmelCase = y[0][0]
for i in range(1 , UpperCamelCase__ ):
summ += (ucal(UpperCamelCase__ , UpperCamelCase__ ) * y[0][i]) / math.factorial(UpperCamelCase__ )
print(f"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 654 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : Dict = logging.get_logger(__name__)
__lowerCAmelCase : List[Any] = {"vocab_file": "sentencepiece.bpe.model"}
__lowerCAmelCase : str = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
}
}
__lowerCAmelCase : Optional[Any] = {
"camembert-base": 512,
}
__lowerCAmelCase : str = "▁"
class A ( UpperCAmelCase ):
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ['''input_ids''', '''attention_mask''']
def __init__( self : Union[str, Any] , __a : int , __a : Union[str, Any]="<s>" , __a : Tuple="</s>" , __a : Optional[int]="</s>" , __a : Dict="<s>" , __a : Tuple="<unk>" , __a : List[str]="<pad>" , __a : List[str]="<mask>" , __a : List[Any]=["<s>NOTUSED", "</s>NOTUSED"] , __a : Optional[Dict[str, Any]] = None , **__a : Optional[int] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
__UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , additional_special_tokens=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
__UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__a ) )
__UpperCAmelCase = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
__UpperCAmelCase = {'''<s>NOTUSED''': 0, '''<pad>''': 1, '''</s>NOTUSED''': 2, '''<unk>''': 3}
__UpperCAmelCase = len(self.fairseq_tokens_to_ids )
__UpperCAmelCase = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
__UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def snake_case__ ( self : Optional[Any] , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
__UpperCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case__ ( self : List[str] , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is None:
return [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1, 1] + ([0] * len(__a )) + [1]
def snake_case__ ( self : int , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case__ ( self : Dict ) -> str:
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def snake_case__ ( self : int ) -> Dict:
__UpperCAmelCase = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case__ ( self : Union[str, Any] , __a : str ) -> List[str]:
return self.sp_model.encode(__a , out_type=__a )
def snake_case__ ( self : int , __a : Union[str, Any] ) -> Union[str, Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(__a ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(__a )
def snake_case__ ( self : Optional[Any] , __a : List[Any] ) -> Any:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def snake_case__ ( self : Dict , __a : Union[str, Any] ) -> Union[str, Any]:
__UpperCAmelCase = []
__UpperCAmelCase = ''''''
__UpperCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__a ) + token
__UpperCAmelCase = True
__UpperCAmelCase = []
else:
current_sub_tokens.append(__a )
__UpperCAmelCase = False
out_string += self.sp_model.decode(__a )
return out_string.strip()
def __getstate__( self : Optional[Any] ) -> Tuple:
__UpperCAmelCase = self.__dict__.copy()
__UpperCAmelCase = None
return state
def __setstate__( self : List[Any] , __a : Optional[int] ) -> Tuple:
__UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__UpperCAmelCase = {}
__UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case__ ( self : Any , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCAmelCase = os.path.join(
__a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , '''wb''' ) as fi:
__UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,)
| 706 |
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Dict = logging.get_logger(__name__)
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""encoder.deit.blocks.{i}.norm1.weight""", f"""encoder.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.norm1.bias""", f"""encoder.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.attn.proj.weight""", f"""encoder.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.attn.proj.bias""", f"""encoder.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.norm2.weight""", f"""encoder.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.norm2.bias""", f"""encoder.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc1.weight""", f"""encoder.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc1.bias""", f"""encoder.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc2.weight""", f"""encoder.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.mlp.fc2.bias""", f"""encoder.encoder.layer.{i}.output.dense.bias""") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''),
('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''),
('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''),
('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''),
('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''),
('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''),
] )
return rename_keys
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
__UpperCAmelCase = state_dict.pop(f"""encoder.deit.blocks.{i}.attn.qkv.weight""" )
__UpperCAmelCase = in_proj_weight[
: encoder_config.hidden_size, :
]
__UpperCAmelCase = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
__UpperCAmelCase = in_proj_weight[
-encoder_config.hidden_size :, :
]
def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase = dct.pop(UpperCamelCase__ )
__UpperCAmelCase = val
def lowerCAmelCase ( UpperCamelCase__ : Dict ):
"""simple docstring"""
if "handwritten" in checkpoint_url:
__UpperCAmelCase = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
__UpperCAmelCase = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'''
__UpperCAmelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert('''RGB''' )
return im
@torch.no_grad()
def lowerCAmelCase ( UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase = ViTConfig(image_size=3_8_4 , qkv_bias=UpperCamelCase__ )
__UpperCAmelCase = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
__UpperCAmelCase = 7_6_8
elif "large" in checkpoint_url:
# use ViT-large encoder
__UpperCAmelCase = 1_0_2_4
__UpperCAmelCase = 4_0_9_6
__UpperCAmelCase = 2_4
__UpperCAmelCase = 1_6
__UpperCAmelCase = 1_0_2_4
else:
raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
__UpperCAmelCase = False
__UpperCAmelCase = '''relu'''
__UpperCAmelCase = 1_0_2_4
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = False
# load HuggingFace model
__UpperCAmelCase = ViTModel(UpperCamelCase__ , add_pooling_layer=UpperCamelCase__ )
__UpperCAmelCase = TrOCRForCausalLM(UpperCamelCase__ )
__UpperCAmelCase = VisionEncoderDecoderModel(encoder=UpperCamelCase__ , decoder=UpperCamelCase__ )
model.eval()
# load state_dict of original model, rename some keys
__UpperCAmelCase = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='''cpu''' , check_hash=UpperCamelCase__ )['''model''']
__UpperCAmelCase = create_rename_keys(UpperCamelCase__ , UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
__UpperCAmelCase = state_dict.pop(UpperCamelCase__ )
if key.startswith('''decoder''' ) and "output_projection" not in key:
__UpperCAmelCase = val
else:
__UpperCAmelCase = val
# load state dict
model.load_state_dict(UpperCamelCase__ )
# Check outputs on an image
__UpperCAmelCase = ViTImageProcessor(size=encoder_config.image_size )
__UpperCAmelCase = RobertaTokenizer.from_pretrained('''roberta-large''' )
__UpperCAmelCase = TrOCRProcessor(UpperCamelCase__ , UpperCamelCase__ )
__UpperCAmelCase = processor(images=prepare_img(UpperCamelCase__ ) , return_tensors='''pt''' ).pixel_values
# verify logits
__UpperCAmelCase = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
__UpperCAmelCase = model(pixel_values=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ )
__UpperCAmelCase = outputs.logits
__UpperCAmelCase = torch.Size([1, 1, 5_0_2_6_5] )
if "trocr-base-handwritten" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-1.45_02, -4.66_83, -0.53_47, -2.92_91, 9.14_35, -3.05_71, 8.97_64, 1.75_60, 8.73_58, -1.53_11] )
elif "trocr-large-handwritten" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-2.64_37, -1.31_29, -2.25_96, -5.34_55, 6.35_39, 1.76_04, 5.49_91, 1.47_02, 5.61_13, 2.01_70] )
elif "trocr-base-printed" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-5.68_16, -5.83_88, 1.13_98, -6.90_34, 6.85_05, -2.43_93, 1.22_84, -1.02_32, -1.96_61, -3.92_10] )
elif "trocr-large-printed" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-6.01_62, -7.09_59, 4.41_55, -5.10_63, 7.04_68, -3.16_31, 2.64_66, -0.30_81, -0.81_06, -1.75_35] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :1_0] , UpperCamelCase__ , atol=1E-3 ), "First elements of logits not as expected"
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
__lowerCAmelCase : Optional[int] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 654 | 0 |
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def lowerCAmelCase ( UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = ArgumentParser(
'''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=UpperCamelCase__ )
__UpperCAmelCase = parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(UpperCamelCase__ )
EnvironmentCommand.register_subcommand(UpperCamelCase__ )
TestCommand.register_subcommand(UpperCamelCase__ )
RunBeamCommand.register_subcommand(UpperCamelCase__ )
DummyDataCommand.register_subcommand(UpperCamelCase__ )
# Parse args
__UpperCAmelCase , __UpperCAmelCase = parser.parse_known_args()
if not hasattr(UpperCamelCase__ , '''func''' ):
parser.print_help()
exit(1 )
__UpperCAmelCase = parse_unknown_args(UpperCamelCase__ )
# Run
__UpperCAmelCase = args.func(UpperCamelCase__ , **UpperCamelCase__ )
service.run()
if __name__ == "__main__":
main()
| 707 |
'''simple docstring'''
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class A ( unittest.TestCase ):
def snake_case__ ( self : List[Any] , __a : List[str] , __a : Optional[Any] ) -> List[Any]:
return f"""gaussian_noise_s={seed}_shape={'_'.join([str(__a ) for s in shape] )}.npy"""
def snake_case__ ( self : Dict ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def snake_case__ ( self : Optional[Any] , __a : Tuple=0 , __a : List[Any]=(4, 4, 6_4, 6_4) , __a : Optional[Any]=False ) -> Tuple:
__UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
__UpperCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(__a , __a ) ) , dtype=__a )
return image
def snake_case__ ( self : int , __a : Optional[Any]=False , __a : Optional[Any]="CompVis/stable-diffusion-v1-4" ) -> Any:
__UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
__UpperCAmelCase = '''bf16''' if fpaa else None
__UpperCAmelCase , __UpperCAmelCase = FlaxUNetaDConditionModel.from_pretrained(
__a , subfolder='''unet''' , dtype=__a , revision=__a )
return model, params
def snake_case__ ( self : str , __a : int=0 , __a : Tuple=(4, 7_7, 7_6_8) , __a : Optional[int]=False ) -> Union[str, Any]:
__UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
__UpperCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(__a , __a ) ) , dtype=__a )
return hidden_states
@parameterized.expand(
[
# fmt: off
[8_3, 4, [-0.2_3_2_3, -0.1_3_0_4, 0.0_8_1_3, -0.3_0_9_3, -0.0_9_1_9, -0.1_5_7_1, -0.1_1_2_5, -0.5_8_0_6]],
[1_7, 0.5_5, [-0.0_8_3_1, -0.2_4_4_3, 0.0_9_0_1, -0.0_9_1_9, 0.3_3_9_6, 0.0_1_0_3, -0.3_7_4_3, 0.0_7_0_1]],
[8, 0.8_9, [-0.4_8_6_3, 0.0_8_5_9, 0.0_8_7_5, -0.1_6_5_8, 0.9_1_9_9, -0.0_1_1_4, 0.4_8_3_9, 0.4_6_3_9]],
[3, 1_0_0_0, [-0.5_6_4_9, 0.2_4_0_2, -0.5_5_1_8, 0.1_2_4_8, 1.1_3_2_8, -0.2_4_4_3, -0.0_3_2_5, -1.0_0_7_8]],
# fmt: on
] )
def snake_case__ ( self : Tuple , __a : Tuple , __a : str , __a : Optional[Any] ) -> Any:
__UpperCAmelCase , __UpperCAmelCase = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=__a )
__UpperCAmelCase = self.get_latents(__a , fpaa=__a )
__UpperCAmelCase = self.get_encoder_hidden_states(__a , fpaa=__a )
__UpperCAmelCase = model.apply(
{'''params''': params} , __a , jnp.array(__a , dtype=jnp.intaa ) , encoder_hidden_states=__a , ).sample
assert sample.shape == latents.shape
__UpperCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__UpperCAmelCase = jnp.array(__a , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(__a , __a , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[8_3, 4, [0.1_5_1_4, 0.0_8_0_7, 0.1_6_2_4, 0.1_0_1_6, -0.1_8_9_6, 0.0_2_6_3, 0.0_6_7_7, 0.2_3_1_0]],
[1_7, 0.5_5, [0.1_1_6_4, -0.0_2_1_6, 0.0_1_7_0, 0.1_5_8_9, -0.3_1_2_0, 0.1_0_0_5, -0.0_5_8_1, -0.1_4_5_8]],
[8, 0.8_9, [-0.1_7_5_8, -0.0_1_6_9, 0.1_0_0_4, -0.1_4_1_1, 0.1_3_1_2, 0.1_1_0_3, -0.1_9_9_6, 0.2_1_3_9]],
[3, 1_0_0_0, [0.1_2_1_4, 0.0_3_5_2, -0.0_7_3_1, -0.1_5_6_2, -0.0_9_9_4, -0.0_9_0_6, -0.2_3_4_0, -0.0_5_3_9]],
# fmt: on
] )
def snake_case__ ( self : Optional[Any] , __a : Optional[int] , __a : Optional[Any] , __a : Optional[Any] ) -> Union[str, Any]:
__UpperCAmelCase , __UpperCAmelCase = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=__a )
__UpperCAmelCase = self.get_latents(__a , shape=(4, 4, 9_6, 9_6) , fpaa=__a )
__UpperCAmelCase = self.get_encoder_hidden_states(__a , shape=(4, 7_7, 1_0_2_4) , fpaa=__a )
__UpperCAmelCase = model.apply(
{'''params''': params} , __a , jnp.array(__a , dtype=jnp.intaa ) , encoder_hidden_states=__a , ).sample
assert sample.shape == latents.shape
__UpperCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__UpperCAmelCase = jnp.array(__a , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(__a , __a , atol=1e-2 )
| 654 | 0 |
'''simple docstring'''
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : Dict ):
"""simple docstring"""
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCAmelCase ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase = SqlDatasetReader(
'''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ ).read()
_check_sql_dataset(UpperCamelCase__ , UpperCamelCase__ )
@require_sqlalchemy
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ):
"""simple docstring"""
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(UpperCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_sql_dataset(UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : List[str] ):
"""simple docstring"""
with contextlib.closing(sqlitea.connect(UpperCamelCase__ ) ) as con:
__UpperCAmelCase = con.cursor()
cur.execute('''SELECT * FROM dataset''' )
for row in cur:
yield row
@require_sqlalchemy
def lowerCAmelCase ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = os.path.join(UpperCamelCase__ , '''tmp.sql''' )
__UpperCAmelCase = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=UpperCamelCase__ ).read()
SqlDatasetWriter(UpperCamelCase__ , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=1 ).write()
__UpperCAmelCase = iter_sql_file(UpperCamelCase__ )
__UpperCAmelCase = iter_sql_file(UpperCamelCase__ )
for rowa, rowa in zip(UpperCamelCase__ , UpperCamelCase__ ):
assert rowa == rowa
@require_sqlalchemy
def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Any ):
"""simple docstring"""
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = os.path.join(UpperCamelCase__ , '''tmp.sql''' )
__UpperCAmelCase = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=UpperCamelCase__ ).read()
SqlDatasetWriter(UpperCamelCase__ , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=2 ).write()
__UpperCAmelCase = iter_sql_file(UpperCamelCase__ )
__UpperCAmelCase = iter_sql_file(UpperCamelCase__ )
for rowa, rowa in zip(UpperCamelCase__ , UpperCamelCase__ ):
assert rowa == rowa
@require_sqlalchemy
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = os.path.join(UpperCamelCase__ , '''tmp.sql''' )
__UpperCAmelCase = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=UpperCamelCase__ ).read()
with pytest.raises(UpperCamelCase__ ):
SqlDatasetWriter(UpperCamelCase__ , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=0 ).write()
| 708 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
__lowerCAmelCase : Optional[int] = "examples/"
__lowerCAmelCase : Dict = {
"examples": (re.compile(r"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(r"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(r"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), r"\1version=\"VERSION\","),
"doc": (re.compile(r"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
__lowerCAmelCase : List[str] = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
__lowerCAmelCase : int = "README.md"
def lowerCAmelCase ( UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple ):
"""simple docstring"""
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__UpperCAmelCase = f.read()
__UpperCAmelCase , __UpperCAmelCase = REPLACE_PATTERNS[pattern]
__UpperCAmelCase = replace.replace('''VERSION''' , UpperCamelCase__ )
__UpperCAmelCase = re_pattern.sub(UpperCamelCase__ , UpperCamelCase__ )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
for folder, directories, fnames in os.walk(UpperCamelCase__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ , pattern='''examples''' )
def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : Any=False ):
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not patch:
update_version_in_examples(UpperCamelCase__ )
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = '''🤗 Transformers currently provides the following architectures'''
__UpperCAmelCase = '''1. Want to contribute a new model?'''
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__UpperCAmelCase = f.readlines()
# Find the start of the list.
__UpperCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__UpperCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
__UpperCAmelCase = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(UpperCamelCase__ )
def lowerCAmelCase ( ):
"""simple docstring"""
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
__UpperCAmelCase = f.read()
__UpperCAmelCase = REPLACE_PATTERNS['''init'''][0].search(UpperCamelCase__ ).groups()[0]
return packaging.version.parse(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : Any=False ):
"""simple docstring"""
__UpperCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
__UpperCAmelCase = default_version.base_version
elif patch:
__UpperCAmelCase = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
__UpperCAmelCase = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
__UpperCAmelCase = input(f"""Which version are you releasing? [{default_version}]""" )
if len(UpperCamelCase__ ) == 0:
__UpperCAmelCase = default_version
print(f"""Updating version to {version}.""" )
global_version_update(UpperCamelCase__ , patch=UpperCamelCase__ )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = get_version()
__UpperCAmelCase = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
__UpperCAmelCase = current_version.base_version
# Check with the user we got that right.
__UpperCAmelCase = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(UpperCamelCase__ ) == 0:
__UpperCAmelCase = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(UpperCamelCase__ )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
__lowerCAmelCase : Tuple = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 654 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase = list(UpperCamelCase__ )
__UpperCAmelCase = list(UpperCamelCase__ )
__UpperCAmelCase = 0
for i in range(len(UpperCamelCase__ ) ):
if lista[i] != lista[i]:
count += 1
__UpperCAmelCase = '''_'''
if count > 1:
return False
else:
return "".join(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : list[str] ):
"""simple docstring"""
__UpperCAmelCase = []
while True:
__UpperCAmelCase = ['''$'''] * len(UpperCamelCase__ )
__UpperCAmelCase = []
for i in range(len(UpperCamelCase__ ) ):
for j in range(i + 1 , len(UpperCamelCase__ ) ):
__UpperCAmelCase = compare_string(binary[i] , binary[j] )
if k is False:
__UpperCAmelCase = '''*'''
__UpperCAmelCase = '''*'''
temp.append('''X''' )
for i in range(len(UpperCamelCase__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(UpperCamelCase__ ) == 0:
return pi
__UpperCAmelCase = list(set(UpperCamelCase__ ) )
def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : Sequence[float] ):
"""simple docstring"""
__UpperCAmelCase = []
for minterm in minterms:
__UpperCAmelCase = ''''''
for _ in range(UpperCamelCase__ ):
__UpperCAmelCase = str(minterm % 2 ) + string
minterm //= 2
temp.append(UpperCamelCase__ )
return temp
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = list(UpperCamelCase__ )
__UpperCAmelCase = list(UpperCamelCase__ )
__UpperCAmelCase = 0
for i in range(len(UpperCamelCase__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCAmelCase ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : list[str] ):
"""simple docstring"""
__UpperCAmelCase = []
__UpperCAmelCase = [0] * len(UpperCamelCase__ )
for i in range(len(chart[0] ) ):
__UpperCAmelCase = 0
__UpperCAmelCase = -1
for j in range(len(UpperCamelCase__ ) ):
if chart[j][i] == 1:
count += 1
__UpperCAmelCase = j
if count == 1:
__UpperCAmelCase = 1
for i in range(len(UpperCamelCase__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(UpperCamelCase__ ) ):
__UpperCAmelCase = 0
temp.append(prime_implicants[i] )
while True:
__UpperCAmelCase = 0
__UpperCAmelCase = -1
__UpperCAmelCase = 0
for i in range(len(UpperCamelCase__ ) ):
__UpperCAmelCase = chart[i].count(1 )
if count_n > max_n:
__UpperCAmelCase = count_n
__UpperCAmelCase = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(UpperCamelCase__ ) ):
__UpperCAmelCase = 0
def lowerCAmelCase ( UpperCamelCase__ : list[str] , UpperCamelCase__ : list[str] ):
"""simple docstring"""
__UpperCAmelCase = [[0 for x in range(len(UpperCamelCase__ ) )] for x in range(len(UpperCamelCase__ ) )]
for i in range(len(UpperCamelCase__ ) ):
__UpperCAmelCase = prime_implicants[i].count('''_''' )
for j in range(len(UpperCamelCase__ ) ):
if is_for_table(prime_implicants[i] , binary[j] , UpperCamelCase__ ):
__UpperCAmelCase = 1
return chart
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = int(input('''Enter the no. of variables\n''' ) )
__UpperCAmelCase = [
float(UpperCamelCase__ )
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split()
]
__UpperCAmelCase = decimal_to_binary(UpperCamelCase__ , UpperCamelCase__ )
__UpperCAmelCase = check(UpperCamelCase__ )
print('''Prime Implicants are:''' )
print(UpperCamelCase__ )
__UpperCAmelCase = prime_implicant_chart(UpperCamelCase__ , UpperCamelCase__ )
__UpperCAmelCase = selection(UpperCamelCase__ , UpperCamelCase__ )
print('''Essential Prime Implicants are:''' )
print(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 709 |
'''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : Tuple ):
"""simple docstring"""
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
__UpperCAmelCase = len(UpperCamelCase__ )
__UpperCAmelCase = max(UpperCamelCase__ )
__UpperCAmelCase = min(UpperCamelCase__ )
# create the counting array
__UpperCAmelCase = coll_max + 1 - coll_min
__UpperCAmelCase = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , UpperCamelCase__ ):
__UpperCAmelCase = counting_arr[i] + counting_arr[i - 1]
# create the output collection
__UpperCAmelCase = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , UpperCamelCase__ ) ):
__UpperCAmelCase = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def lowerCAmelCase ( UpperCamelCase__ : Any ):
"""simple docstring"""
return "".join([chr(UpperCamelCase__ ) for i in counting_sort([ord(UpperCamelCase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("thisisthestring") == "eghhiiinrsssttt"
__lowerCAmelCase : str = input("Enter numbers separated by a comma:\n").strip()
__lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(",")]
print(counting_sort(unsorted))
| 654 | 0 |
'''simple docstring'''
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__lowerCAmelCase : Optional[int] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__lowerCAmelCase : Union[str, Any] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print("\n".join(upper_files) + "\n")
__lowerCAmelCase : str = [file for file in filepaths if " " in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print("\n".join(space_files) + "\n")
__lowerCAmelCase : Optional[Any] = [file for file in filepaths if "-" in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print("\n".join(hyphen_files) + "\n")
__lowerCAmelCase : Tuple = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print("\n".join(nodir_files) + "\n")
__lowerCAmelCase : str = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 710 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def lowerCAmelCase ( UpperCamelCase__ : str = "AAPL" ):
"""simple docstring"""
__UpperCAmelCase = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
__UpperCAmelCase = BeautifulSoup(requests.get(UpperCamelCase__ ).text , '''html.parser''' )
__UpperCAmelCase = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 654 | 0 |
'''simple docstring'''
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_sentencepiece_available():
import sentencepiece as sp
__lowerCAmelCase : str = 5
__lowerCAmelCase : Union[str, Any] = 10
@require_sentencepiece
@require_tokenizers
class A ( UpperCAmelCase , unittest.TestCase ):
a_ = SpeechaTextTokenizer
a_ = False
a_ = True
def snake_case__ ( self : Tuple ) -> List[Any]:
super().setUp()
__UpperCAmelCase = sp.SentencePieceProcessor()
spm_model.Load(__a )
__UpperCAmelCase = ['''<s>''', '''<pad>''', '''</s>''', '''<unk>''']
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(__a ) )]
__UpperCAmelCase = dict(zip(__a , range(len(__a ) ) ) )
__UpperCAmelCase = Path(self.tmpdirname )
save_json(__a , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__a , save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
__UpperCAmelCase = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self : List[str] ) -> Optional[int]:
__UpperCAmelCase = '''<pad>'''
__UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def snake_case__ ( self : List[Any] ) -> Dict:
__UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(__a ) , 1_0_0_1 )
def snake_case__ ( self : List[Any] ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_1 )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
__UpperCAmelCase = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
__UpperCAmelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__a , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [2_8_9, 5_0, 1_4, 1_7_4, 3_8_6] , )
__UpperCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__a , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
__UpperCAmelCase = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(__a , [1_2, 2_5, 8_8, 5_9, 2_8, 2_3, 1_1, 4, 6_0_6, 3_5_1, 3_5_1, 3_5_1, 7, 1_6, 7_0, 5_0, 7_6, 8_4, 1_0, 4, 8] )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def snake_case__ ( self : Tuple ) -> Optional[int]:
# fmt: off
__UpperCAmelCase = {'''input_ids''': [[3_7_9_1, 7_9_7, 3_1, 1_1, 6_4, 7_9_7, 3_1, 2_4_2_9, 4_3_3, 1_2, 1_1_7_6, 1_2, 2_0, 7_8_6, 9_1_5, 1_4_2, 2_4_1_3, 2_4_0, 3_7, 3_2_3_8, 7_9_7, 3_1, 1_1, 3_5, 9_3, 9_1_5, 1_4_2, 2_4_1_3, 2_4_0, 3_7, 5_5_4_0, 5_6_7, 1_2_7_6, 9_3, 3_7, 6_1_0, 4_0, 6_2, 4_5_5, 6_5_7, 1_0_4_2, 1_2_3, 7_8_0, 1_7_7, 3_7, 3_0_9, 2_4_1, 1_2_9_8, 5_1_4, 2_0, 2_9_2, 2_7_3_7, 1_1_4, 2_4_6_9, 2_4_1, 8_5, 6_4, 3_0_2, 5_4_8, 5_2_8, 4_2_3, 4, 5_0_9, 4_0_6, 4_2_3, 3_7, 6_0_1, 4, 7_7_7, 3_0_2, 5_4_8, 5_2_8, 4_2_3, 2_8_4, 4, 3_3_8_8, 5_1_1, 4_5_9, 4, 3_5_5_5, 4_0, 3_2_1, 3_0_2, 7_0_5, 4, 3_3_8_8, 5_1_1, 5_8_3, 3_2_6, 5, 5, 5, 6_2, 3_3_1_0, 5_6_0, 1_7_7, 2_6_8_0, 2_1_7, 1_5_0_8, 3_2, 3_1, 8_5_3, 4_1_8, 6_4, 5_8_3, 5_1_1, 1_6_0_5, 6_2, 3_5, 9_3, 5_6_0, 1_7_7, 2_6_8_0, 2_1_7, 1_5_0_8, 1_5_2_1, 6_4, 5_8_3, 5_1_1, 5_1_9, 6_2, 2_0, 1_5_1_5, 7_6_4, 2_0, 1_4_9, 2_6_1, 5_6_2_5, 7_9_7_2, 2_0, 5_5_4_0, 5_6_7, 1_2_7_6, 9_3, 3_9_2_5, 1_6_7_5, 1_1, 1_5, 8_0_2, 7_9_7_2, 5_7_6, 2_1_7, 1_5_0_8, 1_1, 3_5, 9_3, 1_2_5_3, 2_4_4_1, 1_5, 2_8_9, 6_5_2, 3_1, 4_1_6, 3_2_1, 3_8_4_2, 1_1_5, 4_0, 9_1_1, 8, 4_7_6, 6_1_9, 4, 3_8_0, 1_4_2, 4_2_3, 3_3_5, 2_4_0, 3_5, 9_3, 2_6_4, 8, 1_1, 3_3_5, 5_6_9, 4_2_0, 1_6_3, 5, 2], [2_6_0, 5_4_8, 5_2_8, 4_2_3, 2_0, 4_5_1, 2_0, 2_6_8_1, 1_1_5_3, 3_4_3_4, 2_0, 5_5_4_0, 3_7, 5_6_7, 1_2_6, 1_2_5_3, 2_4_4_1, 3_3_7_6, 4_4_9, 2_1_0, 4_3_1, 1_5_6_3, 1_7_7, 7_6_7, 5_5_4_0, 1_1, 1_2_0_3, 4_7_2, 1_1, 2_9_5_3, 6_8_5, 2_8_5, 3_6_4, 7_0_6, 1_1_5_3, 2_0, 6_7_9_9, 2_0, 2_8_6_9, 2_0, 4_4_6_4, 1_2_6, 4_0, 2_4_2_9, 2_0, 1_0_4_0, 8_6_6, 2_6_6_4, 4_1_8, 2_0, 3_1_8, 2_0, 1_7_2_6, 1_8_6, 2_0, 2_6_5, 5_2_2, 3_5, 9_3, 2_1_9_1, 4_6_3_4, 2_0, 1_0_4_0, 1_2, 6_7_9_9, 1_5, 2_2_8, 2_3_5_6, 1_4_2, 3_1, 1_1, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_5_7_5, 2_6_6_6, 6_8_4, 1_5_8_2, 1_1_7_6, 1_2, 6_2_7, 1_4_9, 6_1_9, 2_0, 4_9_0_2, 5_6_3, 1_1, 2_0, 1_4_9, 2_6_1, 3_4_2_0, 2_3_5_6, 1_7_4, 1_4_2, 4_7_1_4, 1_3_1, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name='''facebook/s2t-small-mustc-en-de-st''' , revision='''a14f04cf0776c02f62a8cb800cf7909e15ea23ad''' , )
@require_sentencepiece
class A ( unittest.TestCase ):
a_ = '''valhalla/s2t_mustc_multilinguial_medium'''
a_ = '''C\'est trop cool'''
a_ = '''Esto es genial'''
@classmethod
def snake_case__ ( cls : Any ) -> List[Any]:
__UpperCAmelCase = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def snake_case__ ( self : List[Any] ) -> List[str]:
self.assertEqual(self.tokenizer.lang_code_to_id['''pt'''] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id['''ru'''] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id['''it'''] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id['''de'''] , 1_1 )
def snake_case__ ( self : Tuple ) -> Tuple:
self.assertEqual(self.tokenizer.vocab_size , 1_0_0_0_0 )
def snake_case__ ( self : int ) -> Dict:
self.assertIn(__a , self.tokenizer.all_special_ids )
__UpperCAmelCase = [ES_CODE, 4, 1_6_0_1, 4_7, 7_6_4_7, 2]
__UpperCAmelCase = self.tokenizer.decode(__a , skip_special_tokens=__a )
__UpperCAmelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__a )
self.assertEqual(__a , __a )
self.assertNotIn(self.tokenizer.eos_token , __a )
def snake_case__ ( self : Optional[Any] ) -> Optional[int]:
__UpperCAmelCase = '''fr'''
__UpperCAmelCase = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , __a )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def snake_case__ ( self : Any ) -> Tuple:
__UpperCAmelCase = '''fr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
__UpperCAmelCase = '''es'''
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 711 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def lowerCAmelCase ( UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = [0] * no_of_processes
__UpperCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(UpperCamelCase__ ):
__UpperCAmelCase = burst_time[i]
__UpperCAmelCase = []
__UpperCAmelCase = 0
__UpperCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__UpperCAmelCase = []
__UpperCAmelCase = -1
for i in range(UpperCamelCase__ ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
__UpperCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__UpperCAmelCase = i
total_time += burst_time[target_process]
completed += 1
__UpperCAmelCase = 0
__UpperCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def lowerCAmelCase ( UpperCamelCase__ : list[int] , UpperCamelCase__ : int , UpperCamelCase__ : list[int] ):
"""simple docstring"""
__UpperCAmelCase = [0] * no_of_processes
for i in range(UpperCamelCase__ ):
__UpperCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("[TEST CASE 01]")
__lowerCAmelCase : List[Any] = 4
__lowerCAmelCase : List[Any] = [2, 5, 3, 7]
__lowerCAmelCase : Tuple = [0, 0, 0, 0]
__lowerCAmelCase : Optional[int] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__lowerCAmelCase : Dict = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time")
for i, process_id in enumerate(list(range(1, 5))):
print(
F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 654 | 0 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class A ( UpperCAmelCase ):
a_ = ['''audio_values''', '''audio_mask''']
def __init__( self : int , __a : Dict=2_0_4_8 , __a : List[str]=1 , __a : str=[1_6, 1_6] , __a : str=1_2_8 , __a : Any=4_4_1_0_0 , __a : Dict=8_6 , __a : int=2_0_4_8 , __a : List[Any]=0.0 , **__a : List[Any] , ) -> List[Any]:
super().__init__(
feature_size=__a , sampling_rate=__a , padding_value=__a , **__a , )
__UpperCAmelCase = spectrogram_length
__UpperCAmelCase = num_channels
__UpperCAmelCase = patch_size
__UpperCAmelCase = feature_size // self.patch_size[1]
__UpperCAmelCase = n_fft
__UpperCAmelCase = sampling_rate // hop_length_to_sampling_rate
__UpperCAmelCase = sampling_rate
__UpperCAmelCase = padding_value
__UpperCAmelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__a , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=__a , norm='''slaney''' , mel_scale='''slaney''' , ).T
def snake_case__ ( self : Optional[Any] , __a : np.array ) -> np.ndarray:
__UpperCAmelCase = spectrogram(
__a , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=8_0.0 , )
__UpperCAmelCase = log_spec[:, :-1]
__UpperCAmelCase = log_spec - 2_0.0
__UpperCAmelCase = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : Union[str, Any] , __a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __a : Optional[Union[str, TensorType]] = None , __a : Optional[bool] = True , __a : Optional[int] = None , __a : bool = False , __a : bool = False , **__a : Dict , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'''This feature extractor is set to support sampling rate'''
f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
f""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
__UpperCAmelCase = isinstance(__a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
__UpperCAmelCase = is_batched_numpy or (
isinstance(__a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__UpperCAmelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__a , np.ndarray ):
__UpperCAmelCase = np.asarray(__a , dtype=np.floataa )
elif isinstance(__a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__UpperCAmelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__UpperCAmelCase = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
__UpperCAmelCase = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , __a ):
__UpperCAmelCase = [np.asarray(__a , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
__UpperCAmelCase = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
__UpperCAmelCase = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
__UpperCAmelCase = np.array(__a ).astype(np.floataa )
# convert into correct format for padding
__UpperCAmelCase = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
__UpperCAmelCase = np.ones([len(__a ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
__UpperCAmelCase = padded_audio_features * self.padding_value
for i in range(len(__a ) ):
__UpperCAmelCase = audio_features[i]
__UpperCAmelCase = feature
# return as BatchFeature
if return_attention_mask:
__UpperCAmelCase = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask}
else:
__UpperCAmelCase = {'''audio_values''': padded_audio_features}
__UpperCAmelCase = BatchFeature(data=__a , tensor_type=__a )
return encoded_inputs
| 712 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : List[str] , **__a : Union[str, Any] ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : List[str] , **__a : int ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : int , **__a : List[Any] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : int , *__a : Optional[Any] , **__a : Tuple ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Tuple , **__a : Optional[Any] ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : str , **__a : Tuple ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : List[str] , **__a : int ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : List[str] , **__a : Optional[int] ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : str , **__a : Any ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[str] , *__a : Dict , **__a : List[str] ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : Any , **__a : List[Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : List[str] , **__a : Union[str, Any] ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : Optional[int] , **__a : Optional[int] ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Dict , **__a : List[Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : List[str] , **__a : List[str] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : Optional[Any] , **__a : int ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : Optional[int] , **__a : Dict ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Tuple , **__a : Any ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Optional[Any] , **__a : int ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Tuple , **__a : Optional[int] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : str , **__a : List[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : List[Any] , **__a : int ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : int , **__a : str ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : List[Any] , **__a : Any ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Tuple , *__a : str , **__a : Tuple ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : List[str] , **__a : Optional[Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Optional[int] , **__a : Union[str, Any] ) -> int:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : Union[str, Any] , **__a : List[str] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Dict , **__a : Union[str, Any] ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : Union[str, Any] , **__a : Any ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : int , **__a : int ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Optional[int] , **__a : int ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : Tuple , **__a : str ) -> Dict:
requires_backends(cls , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : int , **UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : List[Any] ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Any , **UpperCamelCase__ : List[str] ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : str , **UpperCamelCase__ : str ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Dict , **UpperCamelCase__ : Dict ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Tuple , **UpperCamelCase__ : int ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Tuple ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : str , **__a : List[Any] ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : int , **__a : int ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Dict , **__a : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : str , **__a : List[str] ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Dict , **__a : Tuple ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : List[Any] , **__a : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : List[Any] , **__a : List[Any] ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : Tuple , **__a : int ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Any , **__a : int ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : Optional[Any] , **__a : Tuple ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : str , **__a : int ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Dict , **__a : Any ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : str , **__a : str ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Union[str, Any] , **__a : str ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : int , **__a : Tuple ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Tuple , **__a : str ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Any , **__a : Dict ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : str , **__a : List[str] ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : int , **__a : int ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : str , **__a : List[Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : int , **__a : List[str] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : int , *__a : Union[str, Any] , **__a : Optional[Any] ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : List[Any] , **__a : str ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : Optional[Any] , **__a : Any ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[str] , *__a : List[str] , **__a : Dict ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Union[str, Any] , **__a : Optional[int] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : int , **__a : Dict ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[str] , *__a : List[str] , **__a : Union[str, Any] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : int , **__a : Dict ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Tuple , **__a : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : str , **__a : Any ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Dict , **__a : Optional[Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : str , **__a : Union[str, Any] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : Optional[Any] , **__a : List[str] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Union[str, Any] , **__a : Any ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Union[str, Any] , **__a : List[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Tuple , *__a : Optional[int] , **__a : List[Any] ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : List[str] , **__a : Dict ) -> List[str]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : Optional[Any] , **__a : List[Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Any , **__a : str ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : Optional[Any] , **__a : int ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : int , **__a : Optional[Any] ) -> int:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : List[str] , **__a : Dict ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Any , **__a : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Dict , **__a : Union[str, Any] ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : Tuple , **__a : Optional[int] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : List[Any] , **__a : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : List[Any] , **__a : int ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Optional[Any] , **__a : str ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : Tuple , **__a : int ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Optional[int] , **__a : Tuple ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : Union[str, Any] , **__a : List[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : List[Any] , **__a : List[str] ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[Any] , *__a : Optional[int] , **__a : int ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Union[str, Any] , **__a : List[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : int , **__a : Any ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : Any , **__a : int ) -> Tuple:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Union[str, Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : List[Any] , **__a : Dict ) -> int:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[Any] , *__a : int , **__a : Optional[int] ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : List[Any] , **__a : Any ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : Tuple , **__a : List[Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : Dict , **__a : Optional[int] ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : Any , **__a : Dict ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Any , **__a : Any ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : Tuple , **__a : Optional[int] ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Optional[Any] , **__a : Optional[Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : List[Any] , **__a : Dict ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : Union[str, Any] , **__a : Optional[int] ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Any , **__a : Optional[int] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : Union[str, Any] , **__a : List[str] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Optional[int] , **__a : List[Any] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : Dict , **__a : int ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Optional[int] , **__a : Union[str, Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Union[str, Any] , **__a : int ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : Optional[Any] , **__a : int ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : List[Any] , **__a : Optional[int] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Optional[Any] , **__a : Optional[int] ) -> Tuple:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : Optional[int] , **__a : List[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : List[str] , **__a : Union[str, Any] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : Tuple , **__a : Tuple ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : List[str] , **__a : Tuple ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Dict , **__a : Tuple ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Union[str, Any] , **__a : Optional[int] ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : List[str] , **__a : int ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : Tuple , **__a : Optional[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Any , **__a : List[str] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : int , **__a : int ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : Any , **__a : List[Any] ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : int , **__a : str ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : Dict , **__a : Optional[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : List[str] , **__a : int ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Any ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : str , **__a : Optional[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : List[Any] , **__a : List[str] ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Optional[Any] , **__a : str ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : List[Any] , **__a : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : List[Any] , **__a : Optional[Any] ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Any ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : List[str] , **__a : Any ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : List[str] , **__a : Dict ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : Dict , **__a : Optional[Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : str , **__a : Any ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[str] , *__a : Union[str, Any] , **__a : Optional[int] ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Dict , **__a : Tuple ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Any , **__a : Any ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : int , *__a : Any , **__a : Optional[Any] ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : int , **__a : List[Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Dict , **__a : Tuple ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Tuple , *__a : int , **__a : Optional[Any] ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Any , **__a : Optional[int] ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : List[str] , **__a : List[Any] ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : int , **__a : Any ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : List[str] , **__a : List[str] ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
| 654 | 0 |
'''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : int = 1_0_0 ):
"""simple docstring"""
__UpperCAmelCase = 0
__UpperCAmelCase = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 713 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 654 | 0 |
__lowerCAmelCase : Any = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
__lowerCAmelCase : Optional[int] = frozenset(["prompt", "negative_prompt"])
__lowerCAmelCase : Tuple = frozenset([])
__lowerCAmelCase : str = frozenset(["image"])
__lowerCAmelCase : int = frozenset(
[
"image",
"height",
"width",
"guidance_scale",
]
)
__lowerCAmelCase : Optional[Any] = frozenset(["image"])
__lowerCAmelCase : int = frozenset(
[
"prompt",
"image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
__lowerCAmelCase : str = frozenset(["prompt", "image", "negative_prompt"])
__lowerCAmelCase : Union[str, Any] = frozenset(
[
# Text guided image variation with an image mask
"prompt",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
__lowerCAmelCase : int = frozenset(["prompt", "image", "mask_image", "negative_prompt"])
__lowerCAmelCase : Any = frozenset(
[
# image variation with an image mask
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
__lowerCAmelCase : Union[str, Any] = frozenset(["image", "mask_image"])
__lowerCAmelCase : str = frozenset(
[
"example_image",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
__lowerCAmelCase : int = frozenset(["example_image", "image", "mask_image"])
__lowerCAmelCase : int = frozenset(["class_labels"])
__lowerCAmelCase : List[str] = frozenset(["class_labels"])
__lowerCAmelCase : Optional[Any] = frozenset(["batch_size"])
__lowerCAmelCase : Union[str, Any] = frozenset([])
__lowerCAmelCase : Union[str, Any] = frozenset(["batch_size"])
__lowerCAmelCase : str = frozenset([])
__lowerCAmelCase : Optional[Any] = frozenset(
[
"prompt",
"audio_length_in_s",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
__lowerCAmelCase : List[str] = frozenset(["prompt", "negative_prompt"])
__lowerCAmelCase : int = frozenset(["input_tokens"])
__lowerCAmelCase : Union[str, Any] = frozenset(["input_tokens"])
| 714 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : Optional[Any] = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 654 | 0 |
'''simple docstring'''
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase ( UpperCamelCase__ : Tuple , UpperCamelCase__ : str ):
"""simple docstring"""
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCAmelCase ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ):
"""simple docstring"""
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase = ParquetDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ ).read()
_check_parquet_dataset(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict ):
"""simple docstring"""
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(UpperCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = ParquetDatasetReader(UpperCamelCase__ , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_parquet_dataset(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCAmelCase ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple ):
"""simple docstring"""
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = ParquetDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ , split=UpperCamelCase__ ).read()
_check_parquet_dataset(UpperCamelCase__ , UpperCamelCase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def lowerCAmelCase ( UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
if issubclass(UpperCamelCase__ , UpperCamelCase__ ):
__UpperCAmelCase = parquet_path
elif issubclass(UpperCamelCase__ , UpperCamelCase__ ):
__UpperCAmelCase = [parquet_path]
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = ParquetDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_parquet_dataset(UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple=("train",) ):
"""simple docstring"""
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
for split in splits:
__UpperCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCAmelCase ( UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase = ParquetDatasetReader(
{'''train''': parquet_path} , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ ).read()
_check_parquet_datasetdict(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCAmelCase ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(UpperCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = ParquetDatasetReader({'''train''': parquet_path} , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_parquet_datasetdict(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
if split:
__UpperCAmelCase = {split: parquet_path}
else:
__UpperCAmelCase = '''train'''
__UpperCAmelCase = {'''train''': parquet_path, '''test''': parquet_path}
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = ParquetDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_parquet_datasetdict(UpperCamelCase__ , UpperCamelCase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase = ParquetDatasetWriter(UpperCamelCase__ , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
__UpperCAmelCase = pq.ParquetFile(tmp_path / '''foo.parquet''' )
__UpperCAmelCase = pf.read()
assert dataset.data.table == output_table
def lowerCAmelCase ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = str(shared_datadir / '''test_image_rgb.jpg''' )
__UpperCAmelCase = {'''image''': [image_path]}
__UpperCAmelCase = Features({'''image''': Image()} )
__UpperCAmelCase = Dataset.from_dict(UpperCamelCase__ , features=UpperCamelCase__ )
__UpperCAmelCase = ParquetDatasetWriter(UpperCamelCase__ , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
__UpperCAmelCase = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
__UpperCAmelCase = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=UpperCamelCase__ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''' , [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
assert get_writer_batch_size(UpperCamelCase__ ) == expected
| 715 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , ):
"""simple docstring"""
__UpperCAmelCase = {}
if train_file is not None:
__UpperCAmelCase = [train_file]
if eval_file is not None:
__UpperCAmelCase = [eval_file]
if test_file is not None:
__UpperCAmelCase = [test_file]
__UpperCAmelCase = datasets.load_dataset('''csv''' , data_files=UpperCamelCase__ )
__UpperCAmelCase = list(ds[list(files.keys() )[0]].features.keys() )
__UpperCAmelCase = features_name.pop(UpperCamelCase__ )
__UpperCAmelCase = list(set(ds[list(files.keys() )[0]][label_name] ) )
__UpperCAmelCase = {label: i for i, label in enumerate(UpperCamelCase__ )}
__UpperCAmelCase = tokenizer.model_input_names
__UpperCAmelCase = {}
if len(UpperCamelCase__ ) == 1:
for k in files.keys():
__UpperCAmelCase = ds[k].map(
lambda UpperCamelCase__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' ) , batched=UpperCamelCase__ , )
elif len(UpperCamelCase__ ) == 2:
for k in files.keys():
__UpperCAmelCase = ds[k].map(
lambda UpperCamelCase__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' , ) , batched=UpperCamelCase__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names}
__UpperCAmelCase = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names}
__UpperCAmelCase = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names}
__UpperCAmelCase = labelaid[ex[label_name]]
yield (d, label)
__UpperCAmelCase = (
tf.data.Dataset.from_generator(
UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__UpperCAmelCase = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__UpperCAmelCase = (
tf.data.Dataset.from_generator(
UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__UpperCAmelCase = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__UpperCAmelCase = (
tf.data.Dataset.from_generator(
UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__UpperCAmelCase = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__lowerCAmelCase : List[Any] = logging.getLogger(__name__)
@dataclass
class A :
a_ = field(metadata={'''help''': '''Which column contains the label'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the training file'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the development file'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the test file'''} )
a_ = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class A :
a_ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def lowerCAmelCase ( ):
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
f"""16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=UpperCamelCase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(UpperCamelCase__ ) , labelaid=UpperCamelCase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__UpperCAmelCase = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , )
def compute_metrics(UpperCamelCase__ : EvalPrediction ) -> Dict:
__UpperCAmelCase = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__UpperCAmelCase = TFTrainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=UpperCamelCase__ , eval_dataset=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCAmelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__UpperCAmelCase = trainer.evaluate()
__UpperCAmelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(UpperCamelCase__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
results.update(UpperCamelCase__ )
return results
if __name__ == "__main__":
main()
| 654 | 0 |
from math import sqrt
def lowerCAmelCase ( UpperCamelCase__ : int = 1_0_0_0_0_0_0 ):
"""simple docstring"""
__UpperCAmelCase = 0
__UpperCAmelCase = 0
__UpperCAmelCase = 4_2
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(UpperCamelCase__ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"""{solution() = }""")
| 716 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class A :
def __init__( self : List[Any] , __a : Any , ) -> Dict:
__UpperCAmelCase = parent
__UpperCAmelCase = 1_3
__UpperCAmelCase = 7
__UpperCAmelCase = True
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = True
__UpperCAmelCase = 9_9
__UpperCAmelCase = 3_2
__UpperCAmelCase = 2
__UpperCAmelCase = 4
__UpperCAmelCase = 3_7
__UpperCAmelCase = '''gelu'''
__UpperCAmelCase = 0.1
__UpperCAmelCase = 0.1
__UpperCAmelCase = 5_1_2
__UpperCAmelCase = 1_6
__UpperCAmelCase = 2
__UpperCAmelCase = 0.0_2
__UpperCAmelCase = 3
__UpperCAmelCase = 4
__UpperCAmelCase = None
def snake_case__ ( self : Optional[int] ) -> Dict:
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase = None
if self.use_input_mask:
__UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self : Union[str, Any] , __a : List[str] , __a : int , __a : Union[str, Any] , __a : Union[str, Any] , __a : List[Any] , __a : int ) -> Any:
__UpperCAmelCase = TFDistilBertModel(config=__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
__UpperCAmelCase = [input_ids, input_mask]
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : Tuple , __a : List[Any] , __a : int , __a : Tuple , __a : List[Any] , __a : Union[str, Any] , __a : List[Any] ) -> int:
__UpperCAmelCase = TFDistilBertForMaskedLM(config=__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : Optional[int] , __a : Any , __a : Union[str, Any] , __a : Optional[int] , __a : int , __a : Optional[Any] , __a : Optional[int] ) -> Dict:
__UpperCAmelCase = TFDistilBertForQuestionAnswering(config=__a )
__UpperCAmelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self : Any , __a : Optional[Any] , __a : List[str] , __a : Dict , __a : Dict , __a : int , __a : List[Any] ) -> Dict:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = TFDistilBertForSequenceClassification(__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : Union[str, Any] , __a : int , __a : str , __a : Union[str, Any] , __a : Optional[int] , __a : List[str] , __a : Dict ) -> str:
__UpperCAmelCase = self.num_choices
__UpperCAmelCase = TFDistilBertForMultipleChoice(__a )
__UpperCAmelCase = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__ ( self : int , __a : Optional[Any] , __a : int , __a : Tuple , __a : int , __a : Optional[int] , __a : Optional[int] ) -> int:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = TFDistilBertForTokenClassification(__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : str ) -> Any:
__UpperCAmelCase = self.prepare_config_and_inputs()
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) = config_and_inputs
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class A ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
a_ = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
a_ = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a_ = False
a_ = False
def snake_case__ ( self : Any ) -> Any:
__UpperCAmelCase = TFDistilBertModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=__a , dim=3_7 )
def snake_case__ ( self : List[Any] ) -> Optional[int]:
self.config_tester.run_common_tests()
def snake_case__ ( self : Any ) -> str:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__a )
def snake_case__ ( self : Tuple ) -> Dict:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__a )
def snake_case__ ( self : Union[str, Any] ) -> Any:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__a )
def snake_case__ ( self : Optional[Any] ) -> Dict:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__a )
def snake_case__ ( self : Any ) -> int:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__a )
def snake_case__ ( self : List[str] ) -> List[Any]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__a )
@slow
def snake_case__ ( self : Dict ) -> Tuple:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
__UpperCAmelCase = TFDistilBertModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_tf
class A ( unittest.TestCase ):
@slow
def snake_case__ ( self : int ) -> Dict:
__UpperCAmelCase = TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
__UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCAmelCase = model(__a )[0]
__UpperCAmelCase = [1, 6, 7_6_8]
self.assertEqual(output.shape , __a )
__UpperCAmelCase = tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-4 )
| 654 | 0 |
'''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : list[str] ):
"""simple docstring"""
__UpperCAmelCase = ''''''
for word_or_phrase in separated:
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise Exception('''join() accepts only strings to be joined''' )
joined += word_or_phrase + separator
return joined.strip(UpperCamelCase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 717 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__lowerCAmelCase : List[Any] = {
"configuration_audio_spectrogram_transformer": [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ASTConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ASTForAudioClassification",
"ASTModel",
"ASTPreTrainedModel",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] = ["ASTFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 654 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
__lowerCAmelCase : Tuple = {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/config.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/config.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/config.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/config.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/config.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/config.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json",
}
class A ( UpperCAmelCase ):
a_ = '''albert'''
def __init__( self : Union[str, Any] , __a : str=3_0_0_0_0 , __a : List[str]=1_2_8 , __a : List[Any]=4_0_9_6 , __a : str=1_2 , __a : List[str]=1 , __a : Tuple=6_4 , __a : Optional[int]=1_6_3_8_4 , __a : List[Any]=1 , __a : Any="gelu_new" , __a : Any=0 , __a : Dict=0 , __a : Any=5_1_2 , __a : int=2 , __a : List[Any]=0.0_2 , __a : Union[str, Any]=1e-12 , __a : List[str]=0.1 , __a : Union[str, Any]="absolute" , __a : Optional[Any]=0 , __a : Dict=2 , __a : List[str]=3 , **__a : Tuple , ) -> int:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = embedding_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_hidden_groups
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = inner_group_num
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = classifier_dropout_prob
__UpperCAmelCase = position_embedding_type
class A ( UpperCAmelCase ):
@property
def snake_case__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 718 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
class A ( UpperCAmelCase ):
a_ = '''bert-generation'''
def __init__( self : str , __a : str=5_0_3_5_8 , __a : int=1_0_2_4 , __a : Optional[Any]=2_4 , __a : Any=1_6 , __a : int=4_0_9_6 , __a : Any="gelu" , __a : Union[str, Any]=0.1 , __a : Any=0.1 , __a : Union[str, Any]=5_1_2 , __a : int=0.0_2 , __a : str=1e-12 , __a : List[str]=0 , __a : Optional[int]=2 , __a : Tuple=1 , __a : str="absolute" , __a : Optional[Any]=True , **__a : Tuple , ) -> Any:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = position_embedding_type
__UpperCAmelCase = use_cache
| 654 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A ( UpperCAmelCase , unittest.TestCase ):
a_ = OpenAIGPTTokenizer
a_ = OpenAIGPTTokenizerFast
a_ = True
a_ = False
def snake_case__ ( self : Dict ) -> Any:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__UpperCAmelCase = dict(zip(__a , range(len(__a ) ) ) )
__UpperCAmelCase = ['''#version: 0.2''', '''l o''', '''lo w''', '''e r</w>''', '''''']
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(__a ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(__a ) )
def snake_case__ ( self : Optional[Any] , __a : Optional[Any] ) -> List[Any]:
return "lower newer", "lower newer"
def snake_case__ ( self : Any ) -> Optional[int]:
__UpperCAmelCase = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
__UpperCAmelCase = '''lower'''
__UpperCAmelCase = ['''low''', '''er</w>''']
__UpperCAmelCase = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
__UpperCAmelCase = tokens + ['''<unk>''']
__UpperCAmelCase = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def snake_case__ ( self : Tuple , __a : int=1_5 ) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(__a , **__a )
# Simple input
__UpperCAmelCase = '''This is a simple input'''
__UpperCAmelCase = ['''This is a simple input 1''', '''This is a simple input 2''']
__UpperCAmelCase = ('''This is a simple input''', '''This is a pair''')
__UpperCAmelCase = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='''max_length''' )
# Simple input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='''max_length''' )
# Simple input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='''max_length''' , )
# Pair input
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='''max_length''' )
# Pair input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='''max_length''' )
# Pair input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='''max_length''' , )
def snake_case__ ( self : Optional[int] ) -> Optional[int]:
pass
@require_ftfy
@require_spacy
@require_tokenizers
class A ( UpperCAmelCase ):
pass
| 719 |
'''simple docstring'''
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
__lowerCAmelCase : str = 299_792_458
# Symbols
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = symbols("ct x y z")
def lowerCAmelCase ( UpperCamelCase__ : float ):
"""simple docstring"""
if velocity > c:
raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('''Speed must be greater than or equal to 1!''' )
return velocity / c
def lowerCAmelCase ( UpperCamelCase__ : float ):
"""simple docstring"""
return 1 / sqrt(1 - beta(UpperCamelCase__ ) ** 2 )
def lowerCAmelCase ( UpperCamelCase__ : float ):
"""simple docstring"""
return np.array(
[
[gamma(UpperCamelCase__ ), -gamma(UpperCamelCase__ ) * beta(UpperCamelCase__ ), 0, 0],
[-gamma(UpperCamelCase__ ) * beta(UpperCamelCase__ ), gamma(UpperCamelCase__ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : np.ndarray | None = None ):
"""simple docstring"""
# Ensure event is not empty
if event is None:
__UpperCAmelCase = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(UpperCamelCase__ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
__lowerCAmelCase : Dict = transform(29_979_245)
print("Example of four vector: ")
print(F"""ct' = {four_vector[0]}""")
print(F"""x' = {four_vector[1]}""")
print(F"""y' = {four_vector[2]}""")
print(F"""z' = {four_vector[3]}""")
# Substitute symbols with numerical values
__lowerCAmelCase : Union[str, Any] = {ct: c, x: 1, y: 1, z: 1}
__lowerCAmelCase : Optional[int] = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F"""\n{numerical_vector}""")
| 654 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class A ( UpperCAmelCase , unittest.TestCase ):
a_ = ShapEImgaImgPipeline
a_ = ['''image''']
a_ = ['''image''']
a_ = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
a_ = False
@property
def snake_case__ ( self : Tuple ) -> List[str]:
return 3_2
@property
def snake_case__ ( self : Optional[Any] ) -> Optional[int]:
return 3_2
@property
def snake_case__ ( self : Optional[int] ) -> str:
return self.time_input_dim * 4
@property
def snake_case__ ( self : List[Any] ) -> int:
return 8
@property
def snake_case__ ( self : Optional[int] ) -> str:
torch.manual_seed(0 )
__UpperCAmelCase = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=6_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
__UpperCAmelCase = CLIPVisionModel(__a )
return model
@property
def snake_case__ ( self : int ) -> Union[str, Any]:
__UpperCAmelCase = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=__a , do_normalize=__a , do_resize=__a , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=2_2_4 , )
return image_processor
@property
def snake_case__ ( self : List[str] ) -> Dict:
torch.manual_seed(0 )
__UpperCAmelCase = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 1_6,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 3_2,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
__UpperCAmelCase = PriorTransformer(**__a )
return model
@property
def snake_case__ ( self : Dict ) -> Dict:
torch.manual_seed(0 )
__UpperCAmelCase = {
'''param_shapes''': (
(self.renderer_dim, 9_3),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 1_2,
'''background''': (
0.1,
0.1,
0.1,
),
}
__UpperCAmelCase = ShapERenderer(**__a )
return model
def snake_case__ ( self : Optional[int] ) -> Optional[int]:
__UpperCAmelCase = self.dummy_prior
__UpperCAmelCase = self.dummy_image_encoder
__UpperCAmelCase = self.dummy_image_processor
__UpperCAmelCase = self.dummy_renderer
__UpperCAmelCase = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1_0_2_4 , prediction_type='''sample''' , use_karras_sigmas=__a , clip_sample=__a , clip_sample_range=1.0 , )
__UpperCAmelCase = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def snake_case__ ( self : Tuple , __a : Dict , __a : Union[str, Any]=0 ) -> List[str]:
__UpperCAmelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(__a ) ).to(__a )
if str(__a ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(__a )
else:
__UpperCAmelCase = torch.Generator(device=__a ).manual_seed(__a )
__UpperCAmelCase = {
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 3_2,
'''output_type''': '''np''',
}
return inputs
def snake_case__ ( self : Optional[int] ) -> Union[str, Any]:
__UpperCAmelCase = '''cpu'''
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = self.pipeline_class(**__a )
__UpperCAmelCase = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__UpperCAmelCase = pipe(**self.get_dummy_inputs(__a ) )
__UpperCAmelCase = output.images[0]
__UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (2_0, 3_2, 3_2, 3)
__UpperCAmelCase = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case__ ( self : str ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def snake_case__ ( self : Optional[Any] ) -> Optional[Any]:
__UpperCAmelCase = torch_device == '''cpu'''
__UpperCAmelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__a , relax_max_difference=__a , )
def snake_case__ ( self : Dict ) -> List[str]:
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = self.pipeline_class(**__a )
__UpperCAmelCase = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__UpperCAmelCase = 1
__UpperCAmelCase = 2
__UpperCAmelCase = self.get_dummy_inputs(__a )
for key in inputs.keys():
if key in self.batch_params:
__UpperCAmelCase = batch_size * [inputs[key]]
__UpperCAmelCase = pipe(**__a , num_images_per_prompt=__a )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def snake_case__ ( self : Any ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self : Dict ) -> Any:
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
__UpperCAmelCase = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
__UpperCAmelCase = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__UpperCAmelCase = torch.Generator(device=__a ).manual_seed(0 )
__UpperCAmelCase = pipe(
__a , generator=__a , guidance_scale=3.0 , num_inference_steps=6_4 , frame_size=6_4 , output_type='''np''' , ).images[0]
assert images.shape == (2_0, 6_4, 6_4, 3)
assert_mean_pixel_difference(__a , __a )
| 720 |
'''simple docstring'''
import heapq
import sys
import numpy as np
__lowerCAmelCase : Any = tuple[int, int]
class A :
def __init__( self : Optional[int] ) -> int:
__UpperCAmelCase = []
__UpperCAmelCase = set()
def snake_case__ ( self : Optional[Any] ) -> List[Any]:
if not self.empty():
return self.elements[0][0]
else:
return float('''inf''' )
def snake_case__ ( self : Dict ) -> Optional[int]:
return len(self.elements ) == 0
def snake_case__ ( self : Optional[int] , __a : Optional[Any] , __a : Dict ) -> Optional[Any]:
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(__a )
else:
# update
# print("update", item)
__UpperCAmelCase = []
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def snake_case__ ( self : int , __a : Any ) -> int:
if item in self.set:
self.set.remove(__a )
__UpperCAmelCase = []
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def snake_case__ ( self : List[str] ) -> Dict:
return self.elements[0][1]
def snake_case__ ( self : Any ) -> List[str]:
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
self.set.remove(__a )
return (priority, item)
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ):
"""simple docstring"""
# euclidean distance
__UpperCAmelCase = np.array(UpperCamelCase__ )
__UpperCAmelCase = np.array(UpperCamelCase__ )
return np.linalg.norm(a - b )
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ):
"""simple docstring"""
# integer division by time variable
return consistent_heuristic(UpperCamelCase__ , UpperCamelCase__ ) // t
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ):
"""simple docstring"""
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : int , UpperCamelCase__ : TPos , UpperCamelCase__ : dict[TPos, float] ):
"""simple docstring"""
__UpperCAmelCase = g_function[start] + Wa * heuristics[i](UpperCamelCase__ , UpperCamelCase__ )
return ans
def lowerCAmelCase ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase = np.chararray((n, n) )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
__UpperCAmelCase = '''*'''
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
if (j, (n - 1) - i) in blocks:
__UpperCAmelCase = '''#'''
__UpperCAmelCase = '''-'''
__UpperCAmelCase = back_pointer[goal]
while x != start:
((__UpperCAmelCase) , (__UpperCAmelCase)) = x
# print(x)
__UpperCAmelCase = '''-'''
__UpperCAmelCase = back_pointer[x]
__UpperCAmelCase = '''-'''
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=''' ''' )
print('''<-- End position''' , end=''' ''' )
else:
print(grid[i][j] , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
print('''PATH TAKEN BY THE ALGORITHM IS:-''' )
__UpperCAmelCase = back_pointer[goal]
while x != start:
print(UpperCamelCase__ , end=''' ''' )
__UpperCAmelCase = back_pointer[x]
print(UpperCamelCase__ )
sys.exit()
def lowerCAmelCase ( UpperCamelCase__ : TPos ):
"""simple docstring"""
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , ):
"""simple docstring"""
for itera in range(UpperCamelCase__ ):
open_list[itera].remove_element(UpperCamelCase__ )
# print("s", s)
# print("j", j)
((__UpperCAmelCase) , (__UpperCAmelCase)) = s
__UpperCAmelCase = (x - 1, y)
__UpperCAmelCase = (x + 1, y)
__UpperCAmelCase = (x, y + 1)
__UpperCAmelCase = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(UpperCamelCase__ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(UpperCamelCase__ )
__UpperCAmelCase = -1
__UpperCAmelCase = float('''inf''' )
if valid(UpperCamelCase__ ) and g_function[neighbours] > g_function[s] + 1:
__UpperCAmelCase = g_function[s] + 1
__UpperCAmelCase = s
if neighbours not in close_list_anchor:
open_list[0].put(UpperCamelCase__ , key(UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ ) )
if neighbours not in close_list_inad:
for var in range(1 , UpperCamelCase__ ):
if key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) <= Wa * key(
UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ ):
open_list[j].put(
UpperCamelCase__ , key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(1_5 , 2_0 ):
some_list.append((x, 1_7) )
for x in range(1_0 , 1_9 ):
for y in range(1 , 1_5 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(1_2 , 1_9 ):
some_list.append((x, y) )
for x in range(3 , 1_3 ):
for y in range(1_6 , 1_9 ):
some_list.append((x, y) )
return some_list
__lowerCAmelCase : Optional[Any] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
__lowerCAmelCase : List[Any] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
__lowerCAmelCase : Dict = make_common_ground()
__lowerCAmelCase : int = blocks_blk
# hyper parameters
__lowerCAmelCase : Dict = 1
__lowerCAmelCase : List[str] = 1
__lowerCAmelCase : Union[str, Any] = 20
__lowerCAmelCase : Any = 3 # one consistent and two other inconsistent
# start and end destination
__lowerCAmelCase : Optional[Any] = (0, 0)
__lowerCAmelCase : Any = (n - 1, n - 1)
__lowerCAmelCase : Optional[int] = 1
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = {start: 0, goal: float('''inf''' )}
__UpperCAmelCase = {start: -1, goal: -1}
__UpperCAmelCase = []
__UpperCAmelCase = set()
for i in range(UpperCamelCase__ ):
open_list.append(PriorityQueue() )
open_list[i].put(UpperCamelCase__ , key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
__UpperCAmelCase = []
__UpperCAmelCase = []
while open_list[0].minkey() < float('''inf''' ):
for i in range(1 , UpperCamelCase__ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('''inf''' ):
do_something(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
__UpperCAmelCase , __UpperCAmelCase = open_list[i].top_show()
visited.add(UpperCamelCase__ )
expand_state(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
close_list_inad.append(UpperCamelCase__ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('''inf''' ):
do_something(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
__UpperCAmelCase = open_list[0].top_show()
visited.add(UpperCamelCase__ )
expand_state(
UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
close_list_anchor.append(UpperCamelCase__ )
print('''No path found to goal''' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(UpperCamelCase__ ):
if (j, i) in blocks:
print('''#''' , end=''' ''' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('''*''' , end=''' ''' )
else:
print('''-''' , end=''' ''' )
else:
print('''*''' , end=''' ''' )
if (j, i) == (n - 1, n - 1):
print('''<-- End position''' , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 654 | 0 |
'''simple docstring'''
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def lowerCAmelCase ( UpperCamelCase__ : List[Any] ):
"""simple docstring"""
if not is_accelerate_available():
return method
__UpperCAmelCase = version.parse(accelerate.__version__ ).base_version
if version.parse(UpperCamelCase__ ) < version.parse('''0.17.0''' ):
return method
def wrapper(self : Optional[int] , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : str ):
if hasattr(self , '''_hf_hook''' ) and hasattr(self._hf_hook , '''pre_forward''' ):
self._hf_hook.pre_forward(self )
return method(self , *UpperCamelCase__ , **UpperCamelCase__ )
return wrapper
| 721 |
'''simple docstring'''
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
__lowerCAmelCase : List[Any] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__lowerCAmelCase : str = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
__lowerCAmelCase : int = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
__lowerCAmelCase : List[Any] = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__lowerCAmelCase : List[str] = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
__lowerCAmelCase : Optional[int] = [
("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"),
("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"),
("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"),
("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"),
("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"),
("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"),
("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"),
("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"),
("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"),
(
"zero-shot-object-detection",
"MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES",
"AutoModelForZeroShotObjectDetection",
),
("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"),
("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"),
("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"),
("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"),
(
"table-question-answering",
"MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForTableQuestionAnswering",
),
("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"),
("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"),
(
"next-sentence-prediction",
"MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES",
"AutoModelForNextSentencePrediction",
),
(
"audio-frame-classification",
"MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForAudioFrameClassification",
),
("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"),
(
"document-question-answering",
"MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForDocumentQuestionAnswering",
),
(
"visual-question-answering",
"MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForVisualQuestionAnswering",
),
("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"),
(
"zero-shot-image-classification",
"MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForZeroShotImageClassification",
),
("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"),
("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"),
("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"),
]
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , UpperCamelCase__ )
return [m.group(0 ) for m in matches]
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__UpperCAmelCase = {
config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
__UpperCAmelCase = collections.defaultdict(UpperCamelCase__ )
__UpperCAmelCase = collections.defaultdict(UpperCamelCase__ )
__UpperCAmelCase = collections.defaultdict(UpperCamelCase__ )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(UpperCamelCase__ ):
__UpperCAmelCase = None
if _re_tf_models.match(UpperCamelCase__ ) is not None:
__UpperCAmelCase = tf_models
__UpperCAmelCase = _re_tf_models.match(UpperCamelCase__ ).groups()[0]
elif _re_flax_models.match(UpperCamelCase__ ) is not None:
__UpperCAmelCase = flax_models
__UpperCAmelCase = _re_flax_models.match(UpperCamelCase__ ).groups()[0]
elif _re_pt_models.match(UpperCamelCase__ ) is not None:
__UpperCAmelCase = pt_models
__UpperCAmelCase = _re_pt_models.match(UpperCamelCase__ ).groups()[0]
if lookup_dict is not None:
while len(UpperCamelCase__ ) > 0:
if attr_name in model_prefix_to_model_type:
__UpperCAmelCase = True
break
# Try again after removing the last word in the name
__UpperCAmelCase = ''''''.join(camel_case_split(UpperCamelCase__ )[:-1] )
__UpperCAmelCase = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
__UpperCAmelCase = list(UpperCamelCase__ )
all_models.sort()
__UpperCAmelCase = {'''model_type''': all_models}
__UpperCAmelCase = [pt_models[t] for t in all_models]
__UpperCAmelCase = [tf_models[t] for t in all_models]
__UpperCAmelCase = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
__UpperCAmelCase = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
__UpperCAmelCase = '''AutoProcessor'''
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
__UpperCAmelCase = '''AutoTokenizer'''
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
__UpperCAmelCase = '''AutoFeatureExtractor'''
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
__UpperCAmelCase = '''AutoTokenizer'''
__UpperCAmelCase = [processors[t] for t in all_models]
return pd.DataFrame(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
__UpperCAmelCase = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""]
__UpperCAmelCase = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
# The type of pipeline may not exist in this framework
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ):
continue
# First extract all model_names
__UpperCAmelCase = []
for name in getattr(UpperCamelCase__ , UpperCamelCase__ ).values():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
model_names.append(UpperCamelCase__ )
else:
model_names.extend(list(UpperCamelCase__ ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def lowerCAmelCase ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ):
"""simple docstring"""
__UpperCAmelCase = get_frameworks_table()
__UpperCAmelCase = Dataset.from_pandas(UpperCamelCase__ )
__UpperCAmelCase = hf_hub_download(
'''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=UpperCamelCase__ )
__UpperCAmelCase = Dataset.from_json(UpperCamelCase__ )
__UpperCAmelCase = {
tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class'''])
for i in range(len(UpperCamelCase__ ) )
}
__UpperCAmelCase = update_pipeline_and_auto_class_table(UpperCamelCase__ )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
__UpperCAmelCase = sorted(table.keys() )
__UpperCAmelCase = pd.DataFrame(
{
'''model_class''': model_classes,
'''pipeline_tag''': [table[m][0] for m in model_classes],
'''auto_class''': [table[m][1] for m in model_classes],
} )
__UpperCAmelCase = Dataset.from_pandas(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(UpperCamelCase__ , '''frameworks.json''' ) )
tags_dataset.to_json(os.path.join(UpperCamelCase__ , '''pipeline_tags.json''' ) )
if commit_sha is not None:
__UpperCAmelCase = (
f"""Update with commit {commit_sha}\n\nSee: """
f"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
__UpperCAmelCase = '''Update'''
upload_folder(
repo_id='''huggingface/transformers-metadata''' , folder_path=UpperCamelCase__ , repo_type='''dataset''' , token=UpperCamelCase__ , commit_message=UpperCamelCase__ , )
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
__UpperCAmelCase = transformers_module.pipelines.SUPPORTED_TASKS
__UpperCAmelCase = []
for key in pipeline_tasks:
if key not in in_table:
__UpperCAmelCase = pipeline_tasks[key]['''pt''']
if isinstance(UpperCamelCase__ , (list, tuple) ):
__UpperCAmelCase = model[0]
__UpperCAmelCase = model.__name__
if model not in in_table.values():
missing.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
__UpperCAmelCase = ''', '''.join(UpperCamelCase__ )
raise ValueError(
'''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '''
f"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.")
parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.")
parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.")
__lowerCAmelCase : Tuple = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 654 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : int = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[str] = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 700 |
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__lowerCAmelCase : Optional[int] = [
"cross_validation.py",
"gradient_accumulation.py",
"local_sgd.py",
"multi_process_metrics.py",
"memory.py",
"automatic_gradient_accumulation.py",
"fsdp_with_peak_mem_tracking.py",
"deepspeed_with_config_support.py",
"megatron_lm_gpt_pretraining.py",
]
class A ( unittest.TestCase ):
def snake_case__ ( self : Any , __a : str , __a : bool , __a : str = None , __a : list = None ) -> Tuple:
__UpperCAmelCase = None
__UpperCAmelCase = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
__UpperCAmelCase = os.path.abspath('''examples''' )
for item in os.listdir(__a ):
if item not in EXCLUDE_EXAMPLES:
__UpperCAmelCase = os.path.join(__a , __a )
if os.path.isfile(__a ) and ".py" in item_path:
with self.subTest(
tested_script=__a , feature_script=__a , tested_section='''main()''' if parser_only else '''training_function()''' , ):
__UpperCAmelCase = compare_against_test(
os.path.join(__a , __a ) , __a , __a , __a )
__UpperCAmelCase = '''\n'''.join(__a )
if special_strings is not None:
for string in special_strings:
__UpperCAmelCase = diff.replace(__a , '''''' )
self.assertEqual(__a , '''''' )
def snake_case__ ( self : Optional[Any] ) -> str:
self.one_complete_example('''complete_nlp_example.py''' , __a )
self.one_complete_example('''complete_nlp_example.py''' , __a )
def snake_case__ ( self : List[str] ) -> Tuple:
__UpperCAmelCase = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
__UpperCAmelCase = [
''' ''' * 1_6 + '''{\n\n''',
''' ''' * 2_0 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 2_0 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 2_0 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 2_0 + '''"epoch": epoch,\n\n''',
''' ''' * 1_6 + '''},\n\n''',
''' ''' * 1_6 + '''step=epoch,\n''',
''' ''' * 1_2,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , __a , __a , __a )
self.one_complete_example('''complete_cv_example.py''' , __a , __a , __a )
@mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class A ( UpperCAmelCase ):
a_ = False
@classmethod
def snake_case__ ( cls : Tuple ) -> str:
super().setUpClass()
__UpperCAmelCase = tempfile.mkdtemp()
__UpperCAmelCase = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
__UpperCAmelCase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def snake_case__ ( cls : Dict ) -> int:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def snake_case__ ( self : Tuple ) -> Dict:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def snake_case__ ( self : str ) -> int:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
""".split()
__UpperCAmelCase = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def snake_case__ ( self : Any ) -> Any:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}
""".split()
__UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__a )
self.assertNotIn('''epoch 0:''' , __a )
self.assertIn('''epoch 1:''' , __a )
def snake_case__ ( self : Tuple ) -> Optional[int]:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}
""".split()
__UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__a )
if torch.cuda.is_available():
__UpperCAmelCase = torch.cuda.device_count()
else:
__UpperCAmelCase = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , __a )
self.assertIn('''epoch 1:''' , __a )
else:
self.assertIn('''epoch 0:''' , __a )
self.assertIn('''epoch 1:''' , __a )
@slow
def snake_case__ ( self : Any ) -> Optional[Any]:
__UpperCAmelCase = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
__UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__a )
__UpperCAmelCase = re.findall('''({.+})''' , __a )
__UpperCAmelCase = [r for r in results if '''accuracy''' in r][-1]
__UpperCAmelCase = ast.literal_eval(__a )
self.assertGreaterEqual(results['''accuracy'''] , 0.7_5 )
def snake_case__ ( self : Dict ) -> int:
__UpperCAmelCase = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Optional[Any] ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdir:
__UpperCAmelCase = f"""
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__a , '''tracking''' ) ) )
def snake_case__ ( self : Optional[int] ) -> List[Any]:
__UpperCAmelCase = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
__UpperCAmelCase = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 654 | 0 |
import requests
__lowerCAmelCase : Dict = "YOUR API KEY"
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str = giphy_api_key ):
"""simple docstring"""
__UpperCAmelCase = '''+'''.join(query.split() )
__UpperCAmelCase = f"""https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"""
__UpperCAmelCase = requests.get(UpperCamelCase__ ).json()['''data''']
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("\n".join(get_gifs("space ship")))
| 701 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__lowerCAmelCase : Any = ""
__lowerCAmelCase : int = ""
__lowerCAmelCase : Union[str, Any] = ""
__lowerCAmelCase : Any = 1 # (0 is vertical, 1 is horizontal)
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase = get_dataset(UpperCamelCase__ , UpperCamelCase__ )
print('''Processing...''' )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = update_image_and_anno(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for index, image in enumerate(UpperCamelCase__ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__UpperCAmelCase = random_chars(3_2 )
__UpperCAmelCase = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__UpperCAmelCase = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(f"""/{file_root}.jpg""" , UpperCamelCase__ , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(f"""Success {index+1}/{len(UpperCamelCase__ )} with {file_name}""" )
__UpperCAmelCase = []
for anno in new_annos[index]:
__UpperCAmelCase = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(UpperCamelCase__ )
with open(f"""/{file_root}.txt""" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase = []
__UpperCAmelCase = []
for label_file in glob.glob(os.path.join(UpperCamelCase__ , '''*.txt''' ) ):
__UpperCAmelCase = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(UpperCamelCase__ ) as in_file:
__UpperCAmelCase = in_file.readlines()
__UpperCAmelCase = os.path.join(UpperCamelCase__ , f"""{label_name}.jpg""" )
__UpperCAmelCase = []
for obj_list in obj_lists:
__UpperCAmelCase = obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(UpperCamelCase__ )
labels.append(UpperCamelCase__ )
return img_paths, labels
def lowerCAmelCase ( UpperCamelCase__ : list , UpperCamelCase__ : list , UpperCamelCase__ : int = 1 ):
"""simple docstring"""
__UpperCAmelCase = []
__UpperCAmelCase = []
__UpperCAmelCase = []
for idx in range(len(UpperCamelCase__ ) ):
__UpperCAmelCase = []
__UpperCAmelCase = img_list[idx]
path_list.append(UpperCamelCase__ )
__UpperCAmelCase = anno_list[idx]
__UpperCAmelCase = cva.imread(UpperCamelCase__ )
if flip_type == 1:
__UpperCAmelCase = cva.flip(UpperCamelCase__ , UpperCamelCase__ )
for bbox in img_annos:
__UpperCAmelCase = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
__UpperCAmelCase = cva.flip(UpperCamelCase__ , UpperCamelCase__ )
for bbox in img_annos:
__UpperCAmelCase = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(UpperCamelCase__ )
new_imgs_list.append(UpperCamelCase__ )
return new_imgs_list, new_annos_lists, path_list
def lowerCAmelCase ( UpperCamelCase__ : int = 3_2 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
__UpperCAmelCase = ascii_lowercase + digits
return "".join(random.choice(UpperCamelCase__ ) for _ in range(UpperCamelCase__ ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 654 | 0 |
'''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : list[int] ):
"""simple docstring"""
__UpperCAmelCase = []
if len(UpperCamelCase__ ) == 1:
return [nums.copy()]
for _ in range(len(UpperCamelCase__ ) ):
__UpperCAmelCase = nums.pop(0 )
__UpperCAmelCase = permute(UpperCamelCase__ )
for perm in permutations:
perm.append(UpperCamelCase__ )
result.extend(UpperCamelCase__ )
nums.append(UpperCamelCase__ )
return result
def lowerCAmelCase ( UpperCamelCase__ : List[str] ):
"""simple docstring"""
def backtrack(UpperCamelCase__ : str ):
if start == len(UpperCamelCase__ ) - 1:
output.append(nums[:] )
else:
for i in range(UpperCamelCase__ , len(UpperCamelCase__ ) ):
__UpperCAmelCase , __UpperCAmelCase = nums[i], nums[start]
backtrack(start + 1 )
__UpperCAmelCase , __UpperCAmelCase = nums[i], nums[start] # backtrack
__UpperCAmelCase = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
__lowerCAmelCase : Dict = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 702 |
'''simple docstring'''
from pathlib import Path
import fire
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = Path(UpperCamelCase__ )
__UpperCAmelCase = Path(UpperCamelCase__ )
dest_dir.mkdir(exist_ok=UpperCamelCase__ )
for path in src_dir.iterdir():
__UpperCAmelCase = [x.rstrip() for x in list(path.open().readlines() )][:n]
__UpperCAmelCase = dest_dir.joinpath(path.name )
print(UpperCamelCase__ )
dest_path.open('''w''' ).write('''\n'''.join(UpperCamelCase__ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 654 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Dict = logging.get_logger(__name__)
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""encoder.deit.blocks.{i}.norm1.weight""", f"""encoder.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.norm1.bias""", f"""encoder.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.attn.proj.weight""", f"""encoder.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.attn.proj.bias""", f"""encoder.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.norm2.weight""", f"""encoder.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.norm2.bias""", f"""encoder.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc1.weight""", f"""encoder.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc1.bias""", f"""encoder.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc2.weight""", f"""encoder.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.mlp.fc2.bias""", f"""encoder.encoder.layer.{i}.output.dense.bias""") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''),
('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''),
('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''),
('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''),
('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''),
('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''),
] )
return rename_keys
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
__UpperCAmelCase = state_dict.pop(f"""encoder.deit.blocks.{i}.attn.qkv.weight""" )
__UpperCAmelCase = in_proj_weight[
: encoder_config.hidden_size, :
]
__UpperCAmelCase = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
__UpperCAmelCase = in_proj_weight[
-encoder_config.hidden_size :, :
]
def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase = dct.pop(UpperCamelCase__ )
__UpperCAmelCase = val
def lowerCAmelCase ( UpperCamelCase__ : Dict ):
"""simple docstring"""
if "handwritten" in checkpoint_url:
__UpperCAmelCase = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
__UpperCAmelCase = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'''
__UpperCAmelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert('''RGB''' )
return im
@torch.no_grad()
def lowerCAmelCase ( UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase = ViTConfig(image_size=3_8_4 , qkv_bias=UpperCamelCase__ )
__UpperCAmelCase = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
__UpperCAmelCase = 7_6_8
elif "large" in checkpoint_url:
# use ViT-large encoder
__UpperCAmelCase = 1_0_2_4
__UpperCAmelCase = 4_0_9_6
__UpperCAmelCase = 2_4
__UpperCAmelCase = 1_6
__UpperCAmelCase = 1_0_2_4
else:
raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
__UpperCAmelCase = False
__UpperCAmelCase = '''relu'''
__UpperCAmelCase = 1_0_2_4
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = False
# load HuggingFace model
__UpperCAmelCase = ViTModel(UpperCamelCase__ , add_pooling_layer=UpperCamelCase__ )
__UpperCAmelCase = TrOCRForCausalLM(UpperCamelCase__ )
__UpperCAmelCase = VisionEncoderDecoderModel(encoder=UpperCamelCase__ , decoder=UpperCamelCase__ )
model.eval()
# load state_dict of original model, rename some keys
__UpperCAmelCase = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='''cpu''' , check_hash=UpperCamelCase__ )['''model''']
__UpperCAmelCase = create_rename_keys(UpperCamelCase__ , UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
__UpperCAmelCase = state_dict.pop(UpperCamelCase__ )
if key.startswith('''decoder''' ) and "output_projection" not in key:
__UpperCAmelCase = val
else:
__UpperCAmelCase = val
# load state dict
model.load_state_dict(UpperCamelCase__ )
# Check outputs on an image
__UpperCAmelCase = ViTImageProcessor(size=encoder_config.image_size )
__UpperCAmelCase = RobertaTokenizer.from_pretrained('''roberta-large''' )
__UpperCAmelCase = TrOCRProcessor(UpperCamelCase__ , UpperCamelCase__ )
__UpperCAmelCase = processor(images=prepare_img(UpperCamelCase__ ) , return_tensors='''pt''' ).pixel_values
# verify logits
__UpperCAmelCase = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
__UpperCAmelCase = model(pixel_values=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ )
__UpperCAmelCase = outputs.logits
__UpperCAmelCase = torch.Size([1, 1, 5_0_2_6_5] )
if "trocr-base-handwritten" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-1.45_02, -4.66_83, -0.53_47, -2.92_91, 9.14_35, -3.05_71, 8.97_64, 1.75_60, 8.73_58, -1.53_11] )
elif "trocr-large-handwritten" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-2.64_37, -1.31_29, -2.25_96, -5.34_55, 6.35_39, 1.76_04, 5.49_91, 1.47_02, 5.61_13, 2.01_70] )
elif "trocr-base-printed" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-5.68_16, -5.83_88, 1.13_98, -6.90_34, 6.85_05, -2.43_93, 1.22_84, -1.02_32, -1.96_61, -3.92_10] )
elif "trocr-large-printed" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-6.01_62, -7.09_59, 4.41_55, -5.10_63, 7.04_68, -3.16_31, 2.64_66, -0.30_81, -0.81_06, -1.75_35] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :1_0] , UpperCamelCase__ , atol=1E-3 ), "First elements of logits not as expected"
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
__lowerCAmelCase : Optional[int] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 703 |
'''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__UpperCAmelCase = f"""Input value of [number={number}] must be an integer"""
raise TypeError(UpperCamelCase__ )
if number < 1:
__UpperCAmelCase = f"""Input value of [number={number}] must be > 0"""
raise ValueError(UpperCamelCase__ )
__UpperCAmelCase = 1
for i in range(1 , UpperCamelCase__ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 654 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : int = logging.get_logger(__name__)
__lowerCAmelCase : List[Any] = {
"microsoft/beit-base-patch16-224-pt22k": (
"https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class A ( UpperCAmelCase ):
a_ = '''beit'''
def __init__( self : Any , __a : int=8_1_9_2 , __a : Optional[Any]=7_6_8 , __a : Optional[Any]=1_2 , __a : Any=1_2 , __a : List[str]=3_0_7_2 , __a : str="gelu" , __a : Union[str, Any]=0.0 , __a : Tuple=0.0 , __a : Optional[int]=0.0_2 , __a : Union[str, Any]=1e-12 , __a : str=2_2_4 , __a : Tuple=1_6 , __a : Union[str, Any]=3 , __a : Union[str, Any]=False , __a : List[str]=False , __a : int=False , __a : Union[str, Any]=False , __a : Any=0.1 , __a : Any=0.1 , __a : List[Any]=True , __a : Optional[int]=[3, 5, 7, 1_1] , __a : Any=[1, 2, 3, 6] , __a : str=True , __a : Optional[int]=0.4 , __a : List[Any]=2_5_6 , __a : List[Any]=1 , __a : str=False , __a : Any=2_5_5 , **__a : Optional[Any] , ) -> Union[str, Any]:
super().__init__(**__a )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = image_size
__UpperCAmelCase = patch_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = use_mask_token
__UpperCAmelCase = use_absolute_position_embeddings
__UpperCAmelCase = use_relative_position_bias
__UpperCAmelCase = use_shared_relative_position_bias
__UpperCAmelCase = layer_scale_init_value
__UpperCAmelCase = drop_path_rate
__UpperCAmelCase = use_mean_pooling
# decode head attributes (semantic segmentation)
__UpperCAmelCase = out_indices
__UpperCAmelCase = pool_scales
# auxiliary head attributes (semantic segmentation)
__UpperCAmelCase = use_auxiliary_head
__UpperCAmelCase = auxiliary_loss_weight
__UpperCAmelCase = auxiliary_channels
__UpperCAmelCase = auxiliary_num_convs
__UpperCAmelCase = auxiliary_concat_input
__UpperCAmelCase = semantic_loss_ignore_index
class A ( UpperCAmelCase ):
a_ = version.parse('''1.11''' )
@property
def snake_case__ ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def snake_case__ ( self : str ) -> float:
return 1e-4
| 704 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def lowerCAmelCase ( ):
"""simple docstring"""
raise RuntimeError('''CUDA out of memory.''' )
class A ( nn.Module ):
def __init__( self : Optional[Any] ) -> int:
super().__init__()
__UpperCAmelCase = nn.Linear(3 , 4 )
__UpperCAmelCase = nn.BatchNormad(4 )
__UpperCAmelCase = nn.Linear(4 , 5 )
def snake_case__ ( self : List[str] , __a : Optional[int] ) -> Optional[int]:
return self.lineara(self.batchnorm(self.lineara(__a ) ) )
class A ( unittest.TestCase ):
def snake_case__ ( self : Optional[int] ) -> Any:
__UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(__a : Union[str, Any] ):
nonlocal batch_sizes
batch_sizes.append(__a )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(__a , [1_2_8, 6_4, 3_2, 1_6, 8] )
def snake_case__ ( self : str ) -> int:
__UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(__a : str , __a : Optional[int] ):
nonlocal batch_sizes
batch_sizes.append(__a )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
__UpperCAmelCase , __UpperCAmelCase = mock_training_loop_function('''hello''' )
self.assertListEqual(__a , [1_2_8, 6_4, 3_2, 1_6, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def snake_case__ ( self : Any ) -> int:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(__a : Optional[int] ):
pass
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def snake_case__ ( self : Any ) -> List[Any]:
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(__a : Dict ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def snake_case__ ( self : List[Any] ) -> List[str]:
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(__a : str , __a : Union[str, Any] , __a : int ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(__a ) as cm:
mock_training_loop_function(1_2_8 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(__a : Tuple ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def snake_case__ ( self : Any ) -> List[Any]:
__UpperCAmelCase = torch.cuda.memory_allocated()
__UpperCAmelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , __a )
__UpperCAmelCase = release_memory(__a )
self.assertEqual(torch.cuda.memory_allocated() , __a )
| 654 | 0 |
'''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str ):
"""simple docstring"""
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError('''String lengths must match!''' )
__UpperCAmelCase = 0
for chara, chara in zip(UpperCamelCase__ , UpperCamelCase__ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705 |
'''simple docstring'''
from __future__ import annotations
import math
def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = u
for i in range(1 , UpperCamelCase__ ):
__UpperCAmelCase = temp * (u - i)
return temp
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = int(input('''enter the numbers of values: ''' ) )
__UpperCAmelCase = []
for _ in range(UpperCamelCase__ ):
y.append([] )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
y[i].append(UpperCamelCase__ )
__UpperCAmelCase = 0
print('''enter the values of parameters in a list: ''' )
__UpperCAmelCase = list(map(UpperCamelCase__ , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(UpperCamelCase__ ):
__UpperCAmelCase = float(input() )
__UpperCAmelCase = int(input('''enter the value to interpolate: ''' ) )
__UpperCAmelCase = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , UpperCamelCase__ ):
for j in range(n - i ):
__UpperCAmelCase = y[j + 1][i - 1] - y[j][i - 1]
__UpperCAmelCase = y[0][0]
for i in range(1 , UpperCamelCase__ ):
summ += (ucal(UpperCamelCase__ , UpperCamelCase__ ) * y[0][i]) / math.factorial(UpperCamelCase__ )
print(f"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 654 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
a_ = StableDiffusionInstructPixaPixPipeline
a_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
a_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
a_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
a_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def snake_case__ ( self : Optional[Any] ) -> List[str]:
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=8 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
__UpperCAmelCase = PNDMScheduler(skip_prk_steps=__a )
torch.manual_seed(0 )
__UpperCAmelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
__UpperCAmelCase = CLIPTextModel(__a )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def snake_case__ ( self : Union[str, Any] , __a : Optional[int] , __a : Optional[Any]=0 ) -> int:
__UpperCAmelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__a ) ).to(__a )
__UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCAmelCase = Image.fromarray(np.uinta(__a ) ).convert('''RGB''' )
if str(__a ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(__a )
else:
__UpperCAmelCase = torch.Generator(device=__a ).manual_seed(__a )
__UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''image_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__ ( self : List[str] ) -> Optional[Any]:
__UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = StableDiffusionInstructPixaPixPipeline(**__a )
__UpperCAmelCase = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
__UpperCAmelCase = self.get_dummy_inputs(__a )
__UpperCAmelCase = sd_pipe(**__a ).images
__UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__UpperCAmelCase = np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case__ ( self : List[str] ) -> Optional[Any]:
__UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = StableDiffusionInstructPixaPixPipeline(**__a )
__UpperCAmelCase = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
__UpperCAmelCase = self.get_dummy_inputs(__a )
__UpperCAmelCase = '''french fries'''
__UpperCAmelCase = sd_pipe(**__a , negative_prompt=__a )
__UpperCAmelCase = output.images
__UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__UpperCAmelCase = np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case__ ( self : Any ) -> str:
__UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = StableDiffusionInstructPixaPixPipeline(**__a )
__UpperCAmelCase = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
__UpperCAmelCase = self.get_dummy_inputs(__a )
__UpperCAmelCase = [inputs['''prompt''']] * 2
__UpperCAmelCase = np.array(inputs['''image'''] ).astype(np.floataa ) / 2_5_5.0
__UpperCAmelCase = torch.from_numpy(__a ).unsqueeze(0 ).to(__a )
__UpperCAmelCase = image / 2 + 0.5
__UpperCAmelCase = image.permute(0 , 3 , 1 , 2 )
__UpperCAmelCase = image.repeat(2 , 1 , 1 , 1 )
__UpperCAmelCase = sd_pipe(**__a ).images
__UpperCAmelCase = image[-1, -3:, -3:, -1]
assert image.shape == (2, 3_2, 3_2, 3)
__UpperCAmelCase = np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case__ ( self : Optional[int] ) -> Optional[Any]:
__UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' )
__UpperCAmelCase = StableDiffusionInstructPixaPixPipeline(**__a )
__UpperCAmelCase = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
__UpperCAmelCase = self.get_dummy_inputs(__a )
__UpperCAmelCase = sd_pipe(**__a ).images
__UpperCAmelCase = image[0, -3:, -3:, -1]
__UpperCAmelCase = [round(__a , 4 ) for x in image_slice.flatten().tolist()]
print(''','''.join([str(__a ) for x in slice] ) )
assert image.shape == (1, 3_2, 3_2, 3)
__UpperCAmelCase = np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case__ ( self : str ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def snake_case__ ( self : List[Any] ) -> Tuple:
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = StableDiffusionInstructPixaPixPipeline(**__a )
__UpperCAmelCase = VaeImageProcessor(do_resize=__a , do_normalize=__a )
__UpperCAmelCase = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__UpperCAmelCase = pipe(**self.get_dummy_inputs_by_type(__a , input_image_type='''pt''' ) )[0]
__UpperCAmelCase = components['''vae''']
__UpperCAmelCase = self.get_dummy_inputs_by_type(__a , input_image_type='''pt''' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
__UpperCAmelCase = vae.encode(inputs[image_param] ).latent_dist.mode()
__UpperCAmelCase = pipe(**__a )[0]
__UpperCAmelCase = np.abs(out - out_latents_inputs ).max()
self.assertLess(__a , 1e-4 , '''passing latents as image input generate different result from passing image''' )
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def snake_case__ ( self : Dict ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self : List[str] , __a : Dict=0 ) -> List[str]:
__UpperCAmelCase = torch.manual_seed(__a )
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' )
__UpperCAmelCase = {
'''prompt''': '''turn him into a cyborg''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''image_guidance_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__ ( self : Optional[Any] ) -> Union[str, Any]:
__UpperCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__UpperCAmelCase = self.get_inputs()
__UpperCAmelCase = pipe(**__a ).images
__UpperCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__UpperCAmelCase = np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def snake_case__ ( self : Any ) -> List[str]:
__UpperCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=__a )
__UpperCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__UpperCAmelCase = self.get_inputs()
__UpperCAmelCase = pipe(**__a ).images
__UpperCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__UpperCAmelCase = np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def snake_case__ ( self : Tuple ) -> Tuple:
__UpperCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=__a )
__UpperCAmelCase = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__UpperCAmelCase = self.get_inputs()
__UpperCAmelCase = pipe(**__a ).images
__UpperCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__UpperCAmelCase = np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def snake_case__ ( self : Optional[int] ) -> Optional[Any]:
__UpperCAmelCase = 0
def callback_fn(__a : int , __a : int , __a : torch.FloatTensor ) -> None:
__UpperCAmelCase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__UpperCAmelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
__UpperCAmelCase = latents[0, -3:, -3:, -1]
__UpperCAmelCase = np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
__UpperCAmelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
__UpperCAmelCase = latents[0, -3:, -3:, -1]
__UpperCAmelCase = np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
__UpperCAmelCase = False
__UpperCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=__a , torch_dtype=torch.floataa )
__UpperCAmelCase = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__UpperCAmelCase = self.get_inputs()
pipe(**__a , callback=__a , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def snake_case__ ( self : Any ) -> List[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=__a , torch_dtype=torch.floataa )
__UpperCAmelCase = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = self.get_inputs()
__UpperCAmelCase = pipe(**__a )
__UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 1_0**9
def snake_case__ ( self : Optional[int] ) -> Any:
__UpperCAmelCase = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
__UpperCAmelCase = inputs['''image'''].resize((5_0_4, 5_0_4) )
__UpperCAmelCase = '''timbrooks/instruct-pix2pix'''
__UpperCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
__a , safety_checker=__a , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__UpperCAmelCase = pipe(**__a )
__UpperCAmelCase = output.images[0]
__UpperCAmelCase = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 5_0_4, 3)
__UpperCAmelCase = np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 706 |
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Dict = logging.get_logger(__name__)
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""encoder.deit.blocks.{i}.norm1.weight""", f"""encoder.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.norm1.bias""", f"""encoder.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.attn.proj.weight""", f"""encoder.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.attn.proj.bias""", f"""encoder.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.norm2.weight""", f"""encoder.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.norm2.bias""", f"""encoder.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc1.weight""", f"""encoder.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc1.bias""", f"""encoder.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc2.weight""", f"""encoder.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.mlp.fc2.bias""", f"""encoder.encoder.layer.{i}.output.dense.bias""") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''),
('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''),
('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''),
('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''),
('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''),
('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''),
] )
return rename_keys
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
__UpperCAmelCase = state_dict.pop(f"""encoder.deit.blocks.{i}.attn.qkv.weight""" )
__UpperCAmelCase = in_proj_weight[
: encoder_config.hidden_size, :
]
__UpperCAmelCase = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
__UpperCAmelCase = in_proj_weight[
-encoder_config.hidden_size :, :
]
def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase = dct.pop(UpperCamelCase__ )
__UpperCAmelCase = val
def lowerCAmelCase ( UpperCamelCase__ : Dict ):
"""simple docstring"""
if "handwritten" in checkpoint_url:
__UpperCAmelCase = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
__UpperCAmelCase = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'''
__UpperCAmelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert('''RGB''' )
return im
@torch.no_grad()
def lowerCAmelCase ( UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase = ViTConfig(image_size=3_8_4 , qkv_bias=UpperCamelCase__ )
__UpperCAmelCase = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
__UpperCAmelCase = 7_6_8
elif "large" in checkpoint_url:
# use ViT-large encoder
__UpperCAmelCase = 1_0_2_4
__UpperCAmelCase = 4_0_9_6
__UpperCAmelCase = 2_4
__UpperCAmelCase = 1_6
__UpperCAmelCase = 1_0_2_4
else:
raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
__UpperCAmelCase = False
__UpperCAmelCase = '''relu'''
__UpperCAmelCase = 1_0_2_4
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = False
# load HuggingFace model
__UpperCAmelCase = ViTModel(UpperCamelCase__ , add_pooling_layer=UpperCamelCase__ )
__UpperCAmelCase = TrOCRForCausalLM(UpperCamelCase__ )
__UpperCAmelCase = VisionEncoderDecoderModel(encoder=UpperCamelCase__ , decoder=UpperCamelCase__ )
model.eval()
# load state_dict of original model, rename some keys
__UpperCAmelCase = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='''cpu''' , check_hash=UpperCamelCase__ )['''model''']
__UpperCAmelCase = create_rename_keys(UpperCamelCase__ , UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
__UpperCAmelCase = state_dict.pop(UpperCamelCase__ )
if key.startswith('''decoder''' ) and "output_projection" not in key:
__UpperCAmelCase = val
else:
__UpperCAmelCase = val
# load state dict
model.load_state_dict(UpperCamelCase__ )
# Check outputs on an image
__UpperCAmelCase = ViTImageProcessor(size=encoder_config.image_size )
__UpperCAmelCase = RobertaTokenizer.from_pretrained('''roberta-large''' )
__UpperCAmelCase = TrOCRProcessor(UpperCamelCase__ , UpperCamelCase__ )
__UpperCAmelCase = processor(images=prepare_img(UpperCamelCase__ ) , return_tensors='''pt''' ).pixel_values
# verify logits
__UpperCAmelCase = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
__UpperCAmelCase = model(pixel_values=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ )
__UpperCAmelCase = outputs.logits
__UpperCAmelCase = torch.Size([1, 1, 5_0_2_6_5] )
if "trocr-base-handwritten" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-1.45_02, -4.66_83, -0.53_47, -2.92_91, 9.14_35, -3.05_71, 8.97_64, 1.75_60, 8.73_58, -1.53_11] )
elif "trocr-large-handwritten" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-2.64_37, -1.31_29, -2.25_96, -5.34_55, 6.35_39, 1.76_04, 5.49_91, 1.47_02, 5.61_13, 2.01_70] )
elif "trocr-base-printed" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-5.68_16, -5.83_88, 1.13_98, -6.90_34, 6.85_05, -2.43_93, 1.22_84, -1.02_32, -1.96_61, -3.92_10] )
elif "trocr-large-printed" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-6.01_62, -7.09_59, 4.41_55, -5.10_63, 7.04_68, -3.16_31, 2.64_66, -0.30_81, -0.81_06, -1.75_35] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :1_0] , UpperCamelCase__ , atol=1E-3 ), "First elements of logits not as expected"
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
__lowerCAmelCase : Optional[int] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 654 | 0 |
'''simple docstring'''
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class A ( UpperCAmelCase ):
a_ = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
a_ = '''CIDAS/clipseg-rd64-refined'''
a_ = '''image_segmenter'''
a_ = CLIPSegForImageSegmentation
a_ = ['''image''', '''text''']
a_ = ['''image''']
def __init__( self : Tuple , *__a : str , **__a : List[Any] ) -> Tuple:
requires_backends(self , ['''vision'''] )
super().__init__(*__a , **__a )
def snake_case__ ( self : Any , __a : "Image" , __a : str ) -> int:
return self.pre_processor(text=[label] , images=[image] , padding=__a , return_tensors='''pt''' )
def snake_case__ ( self : Any , __a : Dict ) -> List[Any]:
with torch.no_grad():
__UpperCAmelCase = self.model(**__a ).logits
return logits
def snake_case__ ( self : List[Any] , __a : Optional[int] ) -> List[Any]:
__UpperCAmelCase = outputs.cpu().detach().numpy()
__UpperCAmelCase = 0
__UpperCAmelCase = 1
return Image.fromarray((array * 2_5_5).astype(np.uinta ) )
| 707 |
'''simple docstring'''
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class A ( unittest.TestCase ):
def snake_case__ ( self : List[Any] , __a : List[str] , __a : Optional[Any] ) -> List[Any]:
return f"""gaussian_noise_s={seed}_shape={'_'.join([str(__a ) for s in shape] )}.npy"""
def snake_case__ ( self : Dict ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def snake_case__ ( self : Optional[Any] , __a : Tuple=0 , __a : List[Any]=(4, 4, 6_4, 6_4) , __a : Optional[Any]=False ) -> Tuple:
__UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
__UpperCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(__a , __a ) ) , dtype=__a )
return image
def snake_case__ ( self : int , __a : Optional[Any]=False , __a : Optional[Any]="CompVis/stable-diffusion-v1-4" ) -> Any:
__UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
__UpperCAmelCase = '''bf16''' if fpaa else None
__UpperCAmelCase , __UpperCAmelCase = FlaxUNetaDConditionModel.from_pretrained(
__a , subfolder='''unet''' , dtype=__a , revision=__a )
return model, params
def snake_case__ ( self : str , __a : int=0 , __a : Tuple=(4, 7_7, 7_6_8) , __a : Optional[int]=False ) -> Union[str, Any]:
__UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
__UpperCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(__a , __a ) ) , dtype=__a )
return hidden_states
@parameterized.expand(
[
# fmt: off
[8_3, 4, [-0.2_3_2_3, -0.1_3_0_4, 0.0_8_1_3, -0.3_0_9_3, -0.0_9_1_9, -0.1_5_7_1, -0.1_1_2_5, -0.5_8_0_6]],
[1_7, 0.5_5, [-0.0_8_3_1, -0.2_4_4_3, 0.0_9_0_1, -0.0_9_1_9, 0.3_3_9_6, 0.0_1_0_3, -0.3_7_4_3, 0.0_7_0_1]],
[8, 0.8_9, [-0.4_8_6_3, 0.0_8_5_9, 0.0_8_7_5, -0.1_6_5_8, 0.9_1_9_9, -0.0_1_1_4, 0.4_8_3_9, 0.4_6_3_9]],
[3, 1_0_0_0, [-0.5_6_4_9, 0.2_4_0_2, -0.5_5_1_8, 0.1_2_4_8, 1.1_3_2_8, -0.2_4_4_3, -0.0_3_2_5, -1.0_0_7_8]],
# fmt: on
] )
def snake_case__ ( self : Tuple , __a : Tuple , __a : str , __a : Optional[Any] ) -> Any:
__UpperCAmelCase , __UpperCAmelCase = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=__a )
__UpperCAmelCase = self.get_latents(__a , fpaa=__a )
__UpperCAmelCase = self.get_encoder_hidden_states(__a , fpaa=__a )
__UpperCAmelCase = model.apply(
{'''params''': params} , __a , jnp.array(__a , dtype=jnp.intaa ) , encoder_hidden_states=__a , ).sample
assert sample.shape == latents.shape
__UpperCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__UpperCAmelCase = jnp.array(__a , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(__a , __a , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[8_3, 4, [0.1_5_1_4, 0.0_8_0_7, 0.1_6_2_4, 0.1_0_1_6, -0.1_8_9_6, 0.0_2_6_3, 0.0_6_7_7, 0.2_3_1_0]],
[1_7, 0.5_5, [0.1_1_6_4, -0.0_2_1_6, 0.0_1_7_0, 0.1_5_8_9, -0.3_1_2_0, 0.1_0_0_5, -0.0_5_8_1, -0.1_4_5_8]],
[8, 0.8_9, [-0.1_7_5_8, -0.0_1_6_9, 0.1_0_0_4, -0.1_4_1_1, 0.1_3_1_2, 0.1_1_0_3, -0.1_9_9_6, 0.2_1_3_9]],
[3, 1_0_0_0, [0.1_2_1_4, 0.0_3_5_2, -0.0_7_3_1, -0.1_5_6_2, -0.0_9_9_4, -0.0_9_0_6, -0.2_3_4_0, -0.0_5_3_9]],
# fmt: on
] )
def snake_case__ ( self : Optional[Any] , __a : Optional[int] , __a : Optional[Any] , __a : Optional[Any] ) -> Union[str, Any]:
__UpperCAmelCase , __UpperCAmelCase = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=__a )
__UpperCAmelCase = self.get_latents(__a , shape=(4, 4, 9_6, 9_6) , fpaa=__a )
__UpperCAmelCase = self.get_encoder_hidden_states(__a , shape=(4, 7_7, 1_0_2_4) , fpaa=__a )
__UpperCAmelCase = model.apply(
{'''params''': params} , __a , jnp.array(__a , dtype=jnp.intaa ) , encoder_hidden_states=__a , ).sample
assert sample.shape == latents.shape
__UpperCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__UpperCAmelCase = jnp.array(__a , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(__a , __a , atol=1e-2 )
| 654 | 0 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str=0.9_99 , UpperCamelCase__ : Dict="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCamelCase__ : Union[str, Any] ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCamelCase__ : Any ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
__UpperCAmelCase = []
for i in range(UpperCamelCase__ ):
__UpperCAmelCase = i / num_diffusion_timesteps
__UpperCAmelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(UpperCamelCase__ ) / alpha_bar_fn(UpperCamelCase__ ) , UpperCamelCase__ ) )
return torch.tensor(UpperCamelCase__ , dtype=torch.floataa )
class A ( UpperCAmelCase , UpperCAmelCase ):
a_ = [e.name for e in KarrasDiffusionSchedulers]
a_ = 2
@register_to_config
def __init__( self : Tuple , __a : int = 1_0_0_0 , __a : float = 0.0_0_0_8_5 , __a : float = 0.0_1_2 , __a : str = "linear" , __a : Optional[Union[np.ndarray, List[float]]] = None , __a : str = "epsilon" , __a : Optional[bool] = False , __a : Optional[bool] = False , __a : float = 1.0 , __a : str = "linspace" , __a : int = 0 , ) -> str:
if trained_betas is not None:
__UpperCAmelCase = torch.tensor(__a , dtype=torch.floataa )
elif beta_schedule == "linear":
__UpperCAmelCase = torch.linspace(__a , __a , __a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__UpperCAmelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__UpperCAmelCase = betas_for_alpha_bar(__a , alpha_transform_type='''cosine''' )
elif beta_schedule == "exp":
__UpperCAmelCase = betas_for_alpha_bar(__a , alpha_transform_type='''exp''' )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
__UpperCAmelCase = 1.0 - self.betas
__UpperCAmelCase = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(__a , __a , __a )
__UpperCAmelCase = use_karras_sigmas
def snake_case__ ( self : Optional[int] , __a : int , __a : List[Any]=None ) -> Any:
if schedule_timesteps is None:
__UpperCAmelCase = self.timesteps
__UpperCAmelCase = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__UpperCAmelCase = 1 if len(__a ) > 1 else 0
else:
__UpperCAmelCase = timestep.cpu().item() if torch.is_tensor(__a ) else timestep
__UpperCAmelCase = self._index_counter[timestep_int]
return indices[pos].item()
@property
def snake_case__ ( self : List[str] ) -> Tuple:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def snake_case__ ( self : int , __a : torch.FloatTensor , __a : Union[float, torch.FloatTensor] , ) -> torch.FloatTensor:
__UpperCAmelCase = self.index_for_timestep(__a )
__UpperCAmelCase = self.sigmas[step_index]
__UpperCAmelCase = sample / ((sigma**2 + 1) ** 0.5)
return sample
def snake_case__ ( self : str , __a : int , __a : Union[str, torch.device] = None , __a : Optional[int] = None , ) -> Union[str, Any]:
__UpperCAmelCase = num_inference_steps
__UpperCAmelCase = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__UpperCAmelCase = np.linspace(0 , num_train_timesteps - 1 , __a , dtype=__a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__UpperCAmelCase = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__UpperCAmelCase = (np.arange(0 , __a ) * step_ratio).round()[::-1].copy().astype(__a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__UpperCAmelCase = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__UpperCAmelCase = (np.arange(__a , 0 , -step_ratio )).round().copy().astype(__a )
timesteps -= 1
else:
raise ValueError(
f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
__UpperCAmelCase = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__UpperCAmelCase = np.log(__a )
__UpperCAmelCase = np.interp(__a , np.arange(0 , len(__a ) ) , __a )
if self.config.use_karras_sigmas:
__UpperCAmelCase = self._convert_to_karras(in_sigmas=__a , num_inference_steps=self.num_inference_steps )
__UpperCAmelCase = np.array([self._sigma_to_t(__a , __a ) for sigma in sigmas] )
__UpperCAmelCase = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__UpperCAmelCase = torch.from_numpy(__a ).to(device=__a )
__UpperCAmelCase = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
__UpperCAmelCase = torch.from_numpy(__a )
__UpperCAmelCase = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(__a ).startswith('''mps''' ):
# mps does not support float64
__UpperCAmelCase = timesteps.to(__a , dtype=torch.floataa )
else:
__UpperCAmelCase = timesteps.to(device=__a )
# empty dt and derivative
__UpperCAmelCase = None
__UpperCAmelCase = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__UpperCAmelCase = defaultdict(__a )
def snake_case__ ( self : Tuple , __a : Optional[int] , __a : Optional[Any] ) -> List[str]:
# get log sigma
__UpperCAmelCase = np.log(__a )
# get distribution
__UpperCAmelCase = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
__UpperCAmelCase = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
__UpperCAmelCase = low_idx + 1
__UpperCAmelCase = log_sigmas[low_idx]
__UpperCAmelCase = log_sigmas[high_idx]
# interpolate sigmas
__UpperCAmelCase = (low - log_sigma) / (low - high)
__UpperCAmelCase = np.clip(__a , 0 , 1 )
# transform interpolation to time range
__UpperCAmelCase = (1 - w) * low_idx + w * high_idx
__UpperCAmelCase = t.reshape(sigma.shape )
return t
def snake_case__ ( self : List[str] , __a : torch.FloatTensor , __a : int ) -> torch.FloatTensor:
__UpperCAmelCase = in_sigmas[-1].item()
__UpperCAmelCase = in_sigmas[0].item()
__UpperCAmelCase = 7.0 # 7.0 is the value used in the paper
__UpperCAmelCase = np.linspace(0 , 1 , __a )
__UpperCAmelCase = sigma_min ** (1 / rho)
__UpperCAmelCase = sigma_max ** (1 / rho)
__UpperCAmelCase = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def snake_case__ ( self : List[Any] ) -> List[Any]:
return self.dt is None
def snake_case__ ( self : str , __a : Union[torch.FloatTensor, np.ndarray] , __a : Union[float, torch.FloatTensor] , __a : Union[torch.FloatTensor, np.ndarray] , __a : bool = True , ) -> Union[SchedulerOutput, Tuple]:
__UpperCAmelCase = self.index_for_timestep(__a )
# advance index counter by 1
__UpperCAmelCase = timestep.cpu().item() if torch.is_tensor(__a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__UpperCAmelCase = self.sigmas[step_index]
__UpperCAmelCase = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
__UpperCAmelCase = self.sigmas[step_index - 1]
__UpperCAmelCase = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__UpperCAmelCase = 0
__UpperCAmelCase = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__UpperCAmelCase = sigma_hat if self.state_in_first_order else sigma_next
__UpperCAmelCase = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__UpperCAmelCase = sigma_hat if self.state_in_first_order else sigma_next
__UpperCAmelCase = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
__UpperCAmelCase = model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.config.clip_sample:
__UpperCAmelCase = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__UpperCAmelCase = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__UpperCAmelCase = sigma_next - sigma_hat
# store for 2nd order step
__UpperCAmelCase = derivative
__UpperCAmelCase = dt
__UpperCAmelCase = sample
else:
# 2. 2nd order / Heun's method
__UpperCAmelCase = (sample - pred_original_sample) / sigma_next
__UpperCAmelCase = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
__UpperCAmelCase = self.dt
__UpperCAmelCase = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__a )
def snake_case__ ( self : Optional[int] , __a : torch.FloatTensor , __a : torch.FloatTensor , __a : torch.FloatTensor , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__UpperCAmelCase = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__a ):
# mps does not support float64
__UpperCAmelCase = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__UpperCAmelCase = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__UpperCAmelCase = self.timesteps.to(original_samples.device )
__UpperCAmelCase = timesteps.to(original_samples.device )
__UpperCAmelCase = [self.index_for_timestep(__a , __a ) for t in timesteps]
__UpperCAmelCase = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__UpperCAmelCase = sigma.unsqueeze(-1 )
__UpperCAmelCase = original_samples + noise * sigma
return noisy_samples
def __len__( self : Union[str, Any] ) -> Tuple:
return self.config.num_train_timesteps
| 708 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
__lowerCAmelCase : Optional[int] = "examples/"
__lowerCAmelCase : Dict = {
"examples": (re.compile(r"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(r"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(r"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), r"\1version=\"VERSION\","),
"doc": (re.compile(r"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
__lowerCAmelCase : List[str] = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
__lowerCAmelCase : int = "README.md"
def lowerCAmelCase ( UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple ):
"""simple docstring"""
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__UpperCAmelCase = f.read()
__UpperCAmelCase , __UpperCAmelCase = REPLACE_PATTERNS[pattern]
__UpperCAmelCase = replace.replace('''VERSION''' , UpperCamelCase__ )
__UpperCAmelCase = re_pattern.sub(UpperCamelCase__ , UpperCamelCase__ )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
for folder, directories, fnames in os.walk(UpperCamelCase__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ , pattern='''examples''' )
def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : Any=False ):
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not patch:
update_version_in_examples(UpperCamelCase__ )
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = '''🤗 Transformers currently provides the following architectures'''
__UpperCAmelCase = '''1. Want to contribute a new model?'''
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__UpperCAmelCase = f.readlines()
# Find the start of the list.
__UpperCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__UpperCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
__UpperCAmelCase = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(UpperCamelCase__ )
def lowerCAmelCase ( ):
"""simple docstring"""
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
__UpperCAmelCase = f.read()
__UpperCAmelCase = REPLACE_PATTERNS['''init'''][0].search(UpperCamelCase__ ).groups()[0]
return packaging.version.parse(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : Any=False ):
"""simple docstring"""
__UpperCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
__UpperCAmelCase = default_version.base_version
elif patch:
__UpperCAmelCase = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
__UpperCAmelCase = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
__UpperCAmelCase = input(f"""Which version are you releasing? [{default_version}]""" )
if len(UpperCamelCase__ ) == 0:
__UpperCAmelCase = default_version
print(f"""Updating version to {version}.""" )
global_version_update(UpperCamelCase__ , patch=UpperCamelCase__ )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = get_version()
__UpperCAmelCase = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
__UpperCAmelCase = current_version.base_version
# Check with the user we got that right.
__UpperCAmelCase = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(UpperCamelCase__ ) == 0:
__UpperCAmelCase = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(UpperCamelCase__ )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
__lowerCAmelCase : Tuple = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 654 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase ( UpperCamelCase__ : list[list[int]] ):
"""simple docstring"""
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(UpperCamelCase__ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(UpperCamelCase__ ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 |
'''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : Tuple ):
"""simple docstring"""
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
__UpperCAmelCase = len(UpperCamelCase__ )
__UpperCAmelCase = max(UpperCamelCase__ )
__UpperCAmelCase = min(UpperCamelCase__ )
# create the counting array
__UpperCAmelCase = coll_max + 1 - coll_min
__UpperCAmelCase = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , UpperCamelCase__ ):
__UpperCAmelCase = counting_arr[i] + counting_arr[i - 1]
# create the output collection
__UpperCAmelCase = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , UpperCamelCase__ ) ):
__UpperCAmelCase = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def lowerCAmelCase ( UpperCamelCase__ : Any ):
"""simple docstring"""
return "".join([chr(UpperCamelCase__ ) for i in counting_sort([ord(UpperCamelCase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("thisisthestring") == "eghhiiinrsssttt"
__lowerCAmelCase : str = input("Enter numbers separated by a comma:\n").strip()
__lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(",")]
print(counting_sort(unsorted))
| 654 | 0 |
'''simple docstring'''
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__lowerCAmelCase : Tuple = "."
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
__lowerCAmelCase : Optional[Any] = [
"Assert",
"AssignVariableOp",
"EmptyTensorList",
"MergeV2Checkpoints",
"ReadVariableOp",
"ResourceGather",
"RestoreV2",
"SaveV2",
"ShardedFilename",
"StatefulPartitionedCall",
"StaticRegexFullMatch",
"VarHandleOp",
]
def lowerCAmelCase ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = SavedModel()
__UpperCAmelCase = []
with open(os.path.join(UpperCamelCase__ , '''utils''' , '''tf_ops''' , '''onnx.json''' ) ) as f:
__UpperCAmelCase = json.load(UpperCamelCase__ )['''opsets''']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(UpperCamelCase__ )] )
with open(UpperCamelCase__ , '''rb''' ) as f:
saved_model.ParseFromString(f.read() )
__UpperCAmelCase = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
__UpperCAmelCase = sorted(UpperCamelCase__ )
__UpperCAmelCase = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(UpperCamelCase__ )
if strict and len(UpperCamelCase__ ) > 0:
raise Exception(f"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(UpperCamelCase__ ) > 0:
print(f"""Found the following incompatible ops for the opset {opset}:""" )
print(*UpperCamelCase__ , sep='''\n''' )
else:
print(f"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
__lowerCAmelCase : str = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 710 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def lowerCAmelCase ( UpperCamelCase__ : str = "AAPL" ):
"""simple docstring"""
__UpperCAmelCase = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
__UpperCAmelCase = BeautifulSoup(requests.get(UpperCamelCase__ ).text , '''html.parser''' )
__UpperCAmelCase = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 654 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__lowerCAmelCase : List[Any] = {
"configuration_audio_spectrogram_transformer": [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ASTConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ASTForAudioClassification",
"ASTModel",
"ASTPreTrainedModel",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] = ["ASTFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 711 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def lowerCAmelCase ( UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = [0] * no_of_processes
__UpperCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(UpperCamelCase__ ):
__UpperCAmelCase = burst_time[i]
__UpperCAmelCase = []
__UpperCAmelCase = 0
__UpperCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__UpperCAmelCase = []
__UpperCAmelCase = -1
for i in range(UpperCamelCase__ ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
__UpperCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__UpperCAmelCase = i
total_time += burst_time[target_process]
completed += 1
__UpperCAmelCase = 0
__UpperCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def lowerCAmelCase ( UpperCamelCase__ : list[int] , UpperCamelCase__ : int , UpperCamelCase__ : list[int] ):
"""simple docstring"""
__UpperCAmelCase = [0] * no_of_processes
for i in range(UpperCamelCase__ ):
__UpperCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("[TEST CASE 01]")
__lowerCAmelCase : List[Any] = 4
__lowerCAmelCase : List[Any] = [2, 5, 3, 7]
__lowerCAmelCase : Tuple = [0, 0, 0, 0]
__lowerCAmelCase : Optional[int] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__lowerCAmelCase : Dict = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time")
for i, process_id in enumerate(list(range(1, 5))):
print(
F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 654 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__lowerCAmelCase : str = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str=None , ):
"""simple docstring"""
if attention_mask is None:
__UpperCAmelCase = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__UpperCAmelCase = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__UpperCAmelCase = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__UpperCAmelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__UpperCAmelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class A :
def __init__( self : Dict , __a : Any , __a : List[str]=1_3 , __a : List[str]=7 , __a : List[str]=True , __a : List[Any]=False , __a : List[Any]=9_9 , __a : Any=1_6 , __a : Any=2 , __a : Dict=4 , __a : Optional[int]=4 , __a : List[str]="gelu" , __a : Optional[Any]=0.1 , __a : List[str]=0.1 , __a : Optional[int]=3_2 , __a : Union[str, Any]=2 , __a : Dict=1 , __a : Union[str, Any]=0 , __a : int=0.0_2 , ) -> Union[str, Any]:
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = seq_length
__UpperCAmelCase = is_training
__UpperCAmelCase = use_labels
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = eos_token_id
__UpperCAmelCase = pad_token_id
__UpperCAmelCase = bos_token_id
__UpperCAmelCase = initializer_range
def snake_case__ ( self : Dict ) -> Optional[int]:
__UpperCAmelCase = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__UpperCAmelCase = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__UpperCAmelCase = shift_tokens_right(__a , 1 , 2 )
__UpperCAmelCase = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=__a , )
__UpperCAmelCase = prepare_blenderbot_inputs_dict(__a , __a , __a )
return config, inputs_dict
def snake_case__ ( self : Tuple ) -> str:
__UpperCAmelCase , __UpperCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def snake_case__ ( self : str , __a : Optional[Any] , __a : Dict , __a : Union[str, Any] ) -> Tuple:
__UpperCAmelCase = 2_0
__UpperCAmelCase = model_class_name(__a )
__UpperCAmelCase = model.encode(inputs_dict['''input_ids'''] )
__UpperCAmelCase , __UpperCAmelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
__UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __a , __a )
__UpperCAmelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
__UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , __a , decoder_attention_mask=__a , past_key_values=__a , decoder_position_ids=__a , )
__UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
__UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , __a , decoder_attention_mask=__a , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__a , )
__UpperCAmelCase = model.decode(__a , __a )
__UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def snake_case__ ( self : Optional[Any] , __a : str , __a : List[str] , __a : str ) -> Union[str, Any]:
__UpperCAmelCase = 2_0
__UpperCAmelCase = model_class_name(__a )
__UpperCAmelCase = model.encode(inputs_dict['''input_ids'''] )
__UpperCAmelCase , __UpperCAmelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
__UpperCAmelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __a , __a )
__UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , __a , decoder_attention_mask=__a , past_key_values=__a , decoder_position_ids=__a , )
__UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
__UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , __a , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__a , decoder_position_ids=__a , )
__UpperCAmelCase = model.decode(__a , __a , decoder_attention_mask=__a )
__UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class A ( unittest.TestCase ):
a_ = 9_9
def snake_case__ ( self : Optional[Any] ) -> Dict:
__UpperCAmelCase = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
__UpperCAmelCase = input_ids.shape[0]
__UpperCAmelCase = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def snake_case__ ( self : str ) -> Optional[Any]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self._get_config_and_data()
__UpperCAmelCase = FlaxBlenderbotForConditionalGeneration(__a )
__UpperCAmelCase = lm_model(input_ids=__a )
__UpperCAmelCase = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , __a )
def snake_case__ ( self : Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
__UpperCAmelCase = FlaxBlenderbotForConditionalGeneration(__a )
__UpperCAmelCase = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
__UpperCAmelCase = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
__UpperCAmelCase = lm_model(input_ids=__a , decoder_input_ids=__a )
__UpperCAmelCase = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , __a )
def snake_case__ ( self : int ) -> List[Any]:
__UpperCAmelCase = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
__UpperCAmelCase = shift_tokens_right(__a , 1 , 2 )
__UpperCAmelCase = np.equal(__a , 1 ).astype(np.floataa ).sum()
__UpperCAmelCase = np.equal(__a , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(__a , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class A ( UpperCAmelCase , unittest.TestCase , UpperCAmelCase ):
a_ = True
a_ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
a_ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def snake_case__ ( self : Optional[Any] ) -> int:
__UpperCAmelCase = FlaxBlenderbotModelTester(self )
def snake_case__ ( self : Union[str, Any] ) -> str:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__a , __a , __a )
def snake_case__ ( self : int ) -> Dict:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__a , __a , __a )
def snake_case__ ( self : Any ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCAmelCase = self._prepare_for_class(__a , __a )
__UpperCAmelCase = model_class(__a )
@jax.jit
def encode_jitted(__a : List[Any] , __a : Any=None , **__a : Optional[Any] ):
return model.encode(input_ids=__a , attention_mask=__a )
with self.subTest('''JIT Enabled''' ):
__UpperCAmelCase = encode_jitted(**__a ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__UpperCAmelCase = encode_jitted(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) )
for jitted_output, output in zip(__a , __a ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case__ ( self : str ) -> Any:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCAmelCase = model_class(__a )
__UpperCAmelCase = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
__UpperCAmelCase = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(__a : Tuple , __a : Tuple , __a : Optional[Any] ):
return model.decode(
decoder_input_ids=__a , decoder_attention_mask=__a , encoder_outputs=__a , )
with self.subTest('''JIT Enabled''' ):
__UpperCAmelCase = decode_jitted(**__a ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__UpperCAmelCase = decode_jitted(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) )
for jitted_output, output in zip(__a , __a ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def snake_case__ ( self : List[Any] ) -> Optional[int]:
for model_class_name in self.all_model_classes:
__UpperCAmelCase = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__UpperCAmelCase = np.ones((1, 1) ) * model.config.eos_token_id
__UpperCAmelCase = model(__a )
self.assertIsNotNone(__a )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def snake_case__ ( self : Dict ) -> str:
__UpperCAmelCase = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 1_5, '''max_length''': 2_5}
__UpperCAmelCase = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
__UpperCAmelCase = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=__a )
__UpperCAmelCase = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
__UpperCAmelCase = ['''Sam''']
__UpperCAmelCase = tokenizer(__a , return_tensors='''jax''' )
__UpperCAmelCase = model.generate(**__a , **__a )
__UpperCAmelCase = '''Sam is a great name. It means "sun" in Gaelic.'''
__UpperCAmelCase = tokenizer.batch_decode(__a , **__a )
assert generated_txt[0].strip() == tgt_text
| 712 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : List[str] , **__a : Union[str, Any] ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : List[str] , **__a : int ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : int , **__a : List[Any] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : int , *__a : Optional[Any] , **__a : Tuple ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Tuple , **__a : Optional[Any] ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : str , **__a : Tuple ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : List[str] , **__a : int ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : List[str] , **__a : Optional[int] ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : str , **__a : Any ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[str] , *__a : Dict , **__a : List[str] ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : Any , **__a : List[Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : List[str] , **__a : Union[str, Any] ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : Optional[int] , **__a : Optional[int] ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Dict , **__a : List[Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : List[str] , **__a : List[str] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : Optional[Any] , **__a : int ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : Optional[int] , **__a : Dict ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Tuple , **__a : Any ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Optional[Any] , **__a : int ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Tuple , **__a : Optional[int] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : str , **__a : List[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : List[Any] , **__a : int ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : int , **__a : str ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : List[Any] , **__a : Any ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Tuple , *__a : str , **__a : Tuple ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : List[str] , **__a : Optional[Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Optional[int] , **__a : Union[str, Any] ) -> int:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : Union[str, Any] , **__a : List[str] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Dict , **__a : Union[str, Any] ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : Union[str, Any] , **__a : Any ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : int , **__a : int ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Optional[int] , **__a : int ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : Tuple , **__a : str ) -> Dict:
requires_backends(cls , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : int , **UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : List[Any] ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Any , **UpperCamelCase__ : List[str] ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : str , **UpperCamelCase__ : str ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Dict , **UpperCamelCase__ : Dict ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Tuple , **UpperCamelCase__ : int ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Tuple ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : str , **__a : List[Any] ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : int , **__a : int ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Dict , **__a : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : str , **__a : List[str] ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Dict , **__a : Tuple ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : List[Any] , **__a : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : List[Any] , **__a : List[Any] ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : Tuple , **__a : int ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Any , **__a : int ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : Optional[Any] , **__a : Tuple ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : str , **__a : int ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Dict , **__a : Any ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : str , **__a : str ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Union[str, Any] , **__a : str ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : int , **__a : Tuple ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Tuple , **__a : str ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Any , **__a : Dict ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : str , **__a : List[str] ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : int , **__a : int ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : str , **__a : List[Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : int , **__a : List[str] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : int , *__a : Union[str, Any] , **__a : Optional[Any] ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : List[Any] , **__a : str ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : Optional[Any] , **__a : Any ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[str] , *__a : List[str] , **__a : Dict ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Union[str, Any] , **__a : Optional[int] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : int , **__a : Dict ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[str] , *__a : List[str] , **__a : Union[str, Any] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : int , **__a : Dict ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Tuple , **__a : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : str , **__a : Any ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Dict , **__a : Optional[Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : str , **__a : Union[str, Any] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : Optional[Any] , **__a : List[str] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Union[str, Any] , **__a : Any ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Union[str, Any] , **__a : List[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Tuple , *__a : Optional[int] , **__a : List[Any] ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : List[str] , **__a : Dict ) -> List[str]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : Optional[Any] , **__a : List[Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Any , **__a : str ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : Optional[Any] , **__a : int ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : int , **__a : Optional[Any] ) -> int:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : List[str] , **__a : Dict ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Any , **__a : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Dict , **__a : Union[str, Any] ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : Tuple , **__a : Optional[int] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : List[Any] , **__a : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : List[Any] , **__a : int ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Optional[Any] , **__a : str ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : Tuple , **__a : int ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Optional[int] , **__a : Tuple ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : Union[str, Any] , **__a : List[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : List[Any] , **__a : List[str] ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[Any] , *__a : Optional[int] , **__a : int ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Union[str, Any] , **__a : List[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : int , **__a : Any ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : Any , **__a : int ) -> Tuple:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Union[str, Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : List[Any] , **__a : Dict ) -> int:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[Any] , *__a : int , **__a : Optional[int] ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : List[Any] , **__a : Any ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : Tuple , **__a : List[Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : Dict , **__a : Optional[int] ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : Any , **__a : Dict ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Any , **__a : Any ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : Tuple , **__a : Optional[int] ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Optional[Any] , **__a : Optional[Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : List[Any] , **__a : Dict ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : Union[str, Any] , **__a : Optional[int] ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Any , **__a : Optional[int] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : Union[str, Any] , **__a : List[str] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Optional[int] , **__a : List[Any] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : Dict , **__a : int ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Optional[int] , **__a : Union[str, Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Union[str, Any] , **__a : int ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : Optional[Any] , **__a : int ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : List[Any] , **__a : Optional[int] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Optional[Any] , **__a : Optional[int] ) -> Tuple:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : Optional[int] , **__a : List[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : List[str] , **__a : Union[str, Any] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : Tuple , **__a : Tuple ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : List[str] , **__a : Tuple ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Dict , **__a : Tuple ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Union[str, Any] , **__a : Optional[int] ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : List[str] , **__a : int ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : Tuple , **__a : Optional[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Any , **__a : List[str] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : int , **__a : int ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : Any , **__a : List[Any] ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : int , **__a : str ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : Dict , **__a : Optional[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : List[str] , **__a : int ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Any ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : str , **__a : Optional[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : List[Any] , **__a : List[str] ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Optional[Any] , **__a : str ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : List[Any] , **__a : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : List[Any] , **__a : Optional[Any] ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Any ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : List[str] , **__a : Any ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : List[str] , **__a : Dict ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : Dict , **__a : Optional[Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : str , **__a : Any ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[str] , *__a : Union[str, Any] , **__a : Optional[int] ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Dict , **__a : Tuple ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Any , **__a : Any ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : int , *__a : Any , **__a : Optional[Any] ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : int , **__a : List[Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Dict , **__a : Tuple ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Tuple , *__a : int , **__a : Optional[Any] ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Any , **__a : Optional[int] ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : List[str] , **__a : List[Any] ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : int , **__a : Any ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : List[str] , **__a : List[str] ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
| 654 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
class A ( UpperCAmelCase ):
a_ = ['''pixel_values''']
def __init__( self : int , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PIL.Image.BICUBIC , __a : bool = True , __a : Dict[str, int] = None , __a : Union[int, float] = 1 / 2_5_5 , __a : bool = True , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , **__a : Optional[Any] , ) -> None:
super().__init__(**__a )
__UpperCAmelCase = size if size is not None else {'''height''': 2_5_6, '''width''': 2_5_6}
__UpperCAmelCase = get_size_dict(__a )
__UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
__UpperCAmelCase = get_size_dict(__a , param_name='''crop_size''' )
__UpperCAmelCase = do_resize
__UpperCAmelCase = size
__UpperCAmelCase = resample
__UpperCAmelCase = do_center_crop
__UpperCAmelCase = crop_size
__UpperCAmelCase = do_rescale
__UpperCAmelCase = rescale_factor
__UpperCAmelCase = do_normalize
__UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self : Any , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PIL.Image.BICUBIC , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Any , ) -> np.ndarray:
__UpperCAmelCase = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
__a , size=(size['''height'''], size['''width''']) , resample=__a , data_format=__a , **__a )
def snake_case__ ( self : str , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> np.ndarray:
__UpperCAmelCase = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(__a , size=(size['''height'''], size['''width''']) , data_format=__a , **__a )
def snake_case__ ( self : Union[str, Any] , __a : np.ndarray , __a : Union[int, float] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[int] , ) -> Tuple:
return rescale(__a , scale=__a , data_format=__a , **__a )
def snake_case__ ( self : Any , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Any , ) -> np.ndarray:
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def snake_case__ ( self : Tuple , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : Optional[int]=None , __a : bool = None , __a : Dict[str, int] = None , __a : bool = None , __a : float = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[str, TensorType]] = None , __a : ChannelDimension = ChannelDimension.FIRST , **__a : List[str] , ) -> PIL.Image.Image:
__UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
__UpperCAmelCase = resample if resample is not None else self.resample
__UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
__UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
__UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
__UpperCAmelCase = image_std if image_std is not None else self.image_std
__UpperCAmelCase = size if size is not None else self.size
__UpperCAmelCase = get_size_dict(__a )
__UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
__UpperCAmelCase = get_size_dict(__a , param_name='''crop_size''' )
__UpperCAmelCase = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__UpperCAmelCase = [to_numpy_array(__a ) for image in images]
if do_resize:
__UpperCAmelCase = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_center_crop:
__UpperCAmelCase = [self.center_crop(image=__a , size=__a ) for image in images]
if do_rescale:
__UpperCAmelCase = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
__UpperCAmelCase = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
__UpperCAmelCase = [to_channel_dimension_format(__a , __a ) for image in images]
__UpperCAmelCase = {'''pixel_values''': images}
return BatchFeature(data=__a , tensor_type=__a )
| 713 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 654 | 0 |
import random
def lowerCAmelCase ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase = a[left_index]
__UpperCAmelCase = left_index + 1
for j in range(left_index + 1 , UpperCamelCase__ ):
if a[j] < pivot:
__UpperCAmelCase , __UpperCAmelCase = a[i], a[j]
i += 1
__UpperCAmelCase , __UpperCAmelCase = a[i - 1], a[left_index]
return i - 1
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : Dict ):
"""simple docstring"""
if left < right:
__UpperCAmelCase = random.randint(UpperCamelCase__ , right - 1 )
__UpperCAmelCase , __UpperCAmelCase = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
__UpperCAmelCase = partition(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
quick_sort_random(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
UpperCamelCase__ , pivot_index + 1 , UpperCamelCase__ ) # recursive quicksort to the right of the pivot point
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = input('''Enter numbers separated by a comma:\n''' ).strip()
__UpperCAmelCase = [int(UpperCamelCase__ ) for item in user_input.split(''',''' )]
quick_sort_random(UpperCamelCase__ , 0 , len(UpperCamelCase__ ) )
print(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 714 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : Optional[Any] = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 654 | 0 |
'''simple docstring'''
import cva
import numpy as np
class A :
def __init__( self : Union[str, Any] , __a : float , __a : int ) -> List[str]:
if k in (0.0_4, 0.0_6):
__UpperCAmelCase = k
__UpperCAmelCase = window_size
else:
raise ValueError('''invalid k value''' )
def __str__( self : List[str] ) -> str:
return str(self.k )
def snake_case__ ( self : List[str] , __a : str ) -> tuple[cva.Mat, list[list[int]]]:
__UpperCAmelCase = cva.imread(__a , 0 )
__UpperCAmelCase , __UpperCAmelCase = img.shape
__UpperCAmelCase = []
__UpperCAmelCase = img.copy()
__UpperCAmelCase = cva.cvtColor(__a , cva.COLOR_GRAY2RGB )
__UpperCAmelCase , __UpperCAmelCase = np.gradient(__a )
__UpperCAmelCase = dx**2
__UpperCAmelCase = dy**2
__UpperCAmelCase = dx * dy
__UpperCAmelCase = 0.0_4
__UpperCAmelCase = self.window_size // 2
for y in range(__a , h - offset ):
for x in range(__a , w - offset ):
__UpperCAmelCase = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__UpperCAmelCase = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__UpperCAmelCase = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__UpperCAmelCase = (wxx * wyy) - (wxy**2)
__UpperCAmelCase = wxx + wyy
__UpperCAmelCase = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_5_5 )
return color_img, corner_list
if __name__ == "__main__":
__lowerCAmelCase : List[str] = HarrisCorner(0.0_4, 3)
__lowerCAmelCase : Dict = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 715 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , ):
"""simple docstring"""
__UpperCAmelCase = {}
if train_file is not None:
__UpperCAmelCase = [train_file]
if eval_file is not None:
__UpperCAmelCase = [eval_file]
if test_file is not None:
__UpperCAmelCase = [test_file]
__UpperCAmelCase = datasets.load_dataset('''csv''' , data_files=UpperCamelCase__ )
__UpperCAmelCase = list(ds[list(files.keys() )[0]].features.keys() )
__UpperCAmelCase = features_name.pop(UpperCamelCase__ )
__UpperCAmelCase = list(set(ds[list(files.keys() )[0]][label_name] ) )
__UpperCAmelCase = {label: i for i, label in enumerate(UpperCamelCase__ )}
__UpperCAmelCase = tokenizer.model_input_names
__UpperCAmelCase = {}
if len(UpperCamelCase__ ) == 1:
for k in files.keys():
__UpperCAmelCase = ds[k].map(
lambda UpperCamelCase__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' ) , batched=UpperCamelCase__ , )
elif len(UpperCamelCase__ ) == 2:
for k in files.keys():
__UpperCAmelCase = ds[k].map(
lambda UpperCamelCase__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' , ) , batched=UpperCamelCase__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names}
__UpperCAmelCase = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names}
__UpperCAmelCase = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names}
__UpperCAmelCase = labelaid[ex[label_name]]
yield (d, label)
__UpperCAmelCase = (
tf.data.Dataset.from_generator(
UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__UpperCAmelCase = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__UpperCAmelCase = (
tf.data.Dataset.from_generator(
UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__UpperCAmelCase = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__UpperCAmelCase = (
tf.data.Dataset.from_generator(
UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__UpperCAmelCase = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__lowerCAmelCase : List[Any] = logging.getLogger(__name__)
@dataclass
class A :
a_ = field(metadata={'''help''': '''Which column contains the label'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the training file'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the development file'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the test file'''} )
a_ = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class A :
a_ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def lowerCAmelCase ( ):
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
f"""16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=UpperCamelCase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(UpperCamelCase__ ) , labelaid=UpperCamelCase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__UpperCAmelCase = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , )
def compute_metrics(UpperCamelCase__ : EvalPrediction ) -> Dict:
__UpperCAmelCase = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__UpperCAmelCase = TFTrainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=UpperCamelCase__ , eval_dataset=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCAmelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__UpperCAmelCase = trainer.evaluate()
__UpperCAmelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(UpperCamelCase__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
results.update(UpperCamelCase__ )
return results
if __name__ == "__main__":
main()
| 654 | 0 |
from math import factorial
def lowerCAmelCase ( UpperCamelCase__ : int = 1_0_0 ):
"""simple docstring"""
return sum(int(UpperCamelCase__ ) for x in str(factorial(UpperCamelCase__ ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 716 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class A :
def __init__( self : List[Any] , __a : Any , ) -> Dict:
__UpperCAmelCase = parent
__UpperCAmelCase = 1_3
__UpperCAmelCase = 7
__UpperCAmelCase = True
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = True
__UpperCAmelCase = 9_9
__UpperCAmelCase = 3_2
__UpperCAmelCase = 2
__UpperCAmelCase = 4
__UpperCAmelCase = 3_7
__UpperCAmelCase = '''gelu'''
__UpperCAmelCase = 0.1
__UpperCAmelCase = 0.1
__UpperCAmelCase = 5_1_2
__UpperCAmelCase = 1_6
__UpperCAmelCase = 2
__UpperCAmelCase = 0.0_2
__UpperCAmelCase = 3
__UpperCAmelCase = 4
__UpperCAmelCase = None
def snake_case__ ( self : Optional[int] ) -> Dict:
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase = None
if self.use_input_mask:
__UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self : Union[str, Any] , __a : List[str] , __a : int , __a : Union[str, Any] , __a : Union[str, Any] , __a : List[Any] , __a : int ) -> Any:
__UpperCAmelCase = TFDistilBertModel(config=__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
__UpperCAmelCase = [input_ids, input_mask]
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : Tuple , __a : List[Any] , __a : int , __a : Tuple , __a : List[Any] , __a : Union[str, Any] , __a : List[Any] ) -> int:
__UpperCAmelCase = TFDistilBertForMaskedLM(config=__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : Optional[int] , __a : Any , __a : Union[str, Any] , __a : Optional[int] , __a : int , __a : Optional[Any] , __a : Optional[int] ) -> Dict:
__UpperCAmelCase = TFDistilBertForQuestionAnswering(config=__a )
__UpperCAmelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self : Any , __a : Optional[Any] , __a : List[str] , __a : Dict , __a : Dict , __a : int , __a : List[Any] ) -> Dict:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = TFDistilBertForSequenceClassification(__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : Union[str, Any] , __a : int , __a : str , __a : Union[str, Any] , __a : Optional[int] , __a : List[str] , __a : Dict ) -> str:
__UpperCAmelCase = self.num_choices
__UpperCAmelCase = TFDistilBertForMultipleChoice(__a )
__UpperCAmelCase = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__ ( self : int , __a : Optional[Any] , __a : int , __a : Tuple , __a : int , __a : Optional[int] , __a : Optional[int] ) -> int:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = TFDistilBertForTokenClassification(__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : str ) -> Any:
__UpperCAmelCase = self.prepare_config_and_inputs()
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) = config_and_inputs
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class A ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
a_ = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
a_ = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a_ = False
a_ = False
def snake_case__ ( self : Any ) -> Any:
__UpperCAmelCase = TFDistilBertModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=__a , dim=3_7 )
def snake_case__ ( self : List[Any] ) -> Optional[int]:
self.config_tester.run_common_tests()
def snake_case__ ( self : Any ) -> str:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__a )
def snake_case__ ( self : Tuple ) -> Dict:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__a )
def snake_case__ ( self : Union[str, Any] ) -> Any:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__a )
def snake_case__ ( self : Optional[Any] ) -> Dict:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__a )
def snake_case__ ( self : Any ) -> int:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__a )
def snake_case__ ( self : List[str] ) -> List[Any]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__a )
@slow
def snake_case__ ( self : Dict ) -> Tuple:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
__UpperCAmelCase = TFDistilBertModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_tf
class A ( unittest.TestCase ):
@slow
def snake_case__ ( self : int ) -> Dict:
__UpperCAmelCase = TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
__UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCAmelCase = model(__a )[0]
__UpperCAmelCase = [1, 6, 7_6_8]
self.assertEqual(output.shape , __a )
__UpperCAmelCase = tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-4 )
| 654 | 0 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
__lowerCAmelCase : List[str] = "\\n@inproceedings{popovic-2015-chrf,\n title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",\n month = sep,\n year = \"2015\",\n address = \"Lisbon, Portugal\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W15-3049\",\n doi = \"10.18653/v1/W15-3049\",\n pages = \"392--395\",\n}\n@inproceedings{popovic-2017-chrf,\n title = \"chr{F}++: words helping character n-grams\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Second Conference on Machine Translation\",\n month = sep,\n year = \"2017\",\n address = \"Copenhagen, Denmark\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W17-4770\",\n doi = \"10.18653/v1/W17-4770\",\n pages = \"612--618\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
__lowerCAmelCase : str = "\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n"
__lowerCAmelCase : int = "\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n 'score' (float): The chrF (chrF++) score,\n 'char_order' (int): The character n-gram order,\n 'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n 'beta' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
'''simple docstring'''
def snake_case__ ( self : int ) -> int:
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/mjpost/sacreBLEU#chrf--chrf''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#chrf--chrf'''] , reference_urls=[
'''https://github.com/m-popovic/chrF''',
] , )
def snake_case__ ( self : int , __a : Optional[Any] , __a : int , __a : int = CHRF.CHAR_ORDER , __a : int = CHRF.WORD_ORDER , __a : int = CHRF.BETA , __a : bool = False , __a : bool = False , __a : bool = False , ) -> Tuple:
__UpperCAmelCase = len(references[0] )
if any(len(__a ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
__UpperCAmelCase = [[refs[i] for refs in references] for i in range(__a )]
__UpperCAmelCase = CHRF(__a , __a , __a , __a , __a , __a )
__UpperCAmelCase = sb_chrf.corpus_score(__a , __a )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 717 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__lowerCAmelCase : List[Any] = {
"configuration_audio_spectrogram_transformer": [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ASTConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ASTForAudioClassification",
"ASTModel",
"ASTPreTrainedModel",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] = ["ASTFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 654 | 0 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class A :
a_ = PegasusConfig
a_ = {}
a_ = '''gelu'''
def __init__( self : Tuple , __a : int , __a : List[str]=1_3 , __a : Dict=7 , __a : Dict=True , __a : Dict=False , __a : Dict=9_9 , __a : Tuple=3_2 , __a : Optional[Any]=2 , __a : Optional[int]=4 , __a : str=3_7 , __a : str=0.1 , __a : Optional[Any]=0.1 , __a : List[Any]=4_0 , __a : str=2 , __a : Union[str, Any]=1 , __a : int=0 , ) -> List[str]:
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = seq_length
__UpperCAmelCase = is_training
__UpperCAmelCase = use_labels
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = eos_token_id
__UpperCAmelCase = pad_token_id
__UpperCAmelCase = bos_token_id
def snake_case__ ( self : Optional[int] ) -> str:
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__UpperCAmelCase = prepare_pegasus_inputs_dict(__a , __a , __a )
return config, inputs_dict
def snake_case__ ( self : Optional[int] , __a : Optional[int] , __a : List[Any] ) -> int:
__UpperCAmelCase = TFPegasusModel(config=__a ).get_decoder()
__UpperCAmelCase = inputs_dict['''input_ids''']
__UpperCAmelCase = input_ids[:1, :]
__UpperCAmelCase = inputs_dict['''attention_mask'''][:1, :]
__UpperCAmelCase = inputs_dict['''head_mask''']
__UpperCAmelCase = 1
# first forward pass
__UpperCAmelCase = model(__a , attention_mask=__a , head_mask=__a , use_cache=__a )
__UpperCAmelCase , __UpperCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__UpperCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
__UpperCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__UpperCAmelCase = model(__a , attention_mask=__a )[0]
__UpperCAmelCase = model(__a , attention_mask=__a , past_key_values=__a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__UpperCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx]
__UpperCAmelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__a , __a , rtol=1e-3 )
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : str=None , UpperCamelCase__ : Optional[int]=None , ):
"""simple docstring"""
if attention_mask is None:
__UpperCAmelCase = tf.cast(tf.math.not_equal(UpperCamelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__UpperCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class A ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
a_ = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
a_ = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
a_ = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
a_ = True
a_ = False
a_ = False
def snake_case__ ( self : int ) -> Optional[Any]:
__UpperCAmelCase = TFPegasusModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=__a )
def snake_case__ ( self : Tuple ) -> Optional[int]:
self.config_tester.run_common_tests()
def snake_case__ ( self : Dict ) -> int:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
@require_sentencepiece
@require_tokenizers
@require_tf
class A ( unittest.TestCase ):
a_ = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
a_ = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
a_ = '''google/pegasus-xsum'''
@cached_property
def snake_case__ ( self : str ) -> Any:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def snake_case__ ( self : Dict ) -> Tuple:
__UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def snake_case__ ( self : Optional[int] , **__a : Optional[int] ) -> Optional[Any]:
__UpperCAmelCase = self.translate_src_text(**__a )
assert self.expected_text == generated_words
def snake_case__ ( self : str , **__a : List[str] ) -> str:
__UpperCAmelCase = self.tokenizer(self.src_text , **__a , padding=__a , return_tensors='''tf''' )
__UpperCAmelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__a , )
__UpperCAmelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__a )
return generated_words
@slow
def snake_case__ ( self : int ) -> Tuple:
self._assert_generated_batch_equal_expected()
| 718 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
class A ( UpperCAmelCase ):
a_ = '''bert-generation'''
def __init__( self : str , __a : str=5_0_3_5_8 , __a : int=1_0_2_4 , __a : Optional[Any]=2_4 , __a : Any=1_6 , __a : int=4_0_9_6 , __a : Any="gelu" , __a : Union[str, Any]=0.1 , __a : Any=0.1 , __a : Union[str, Any]=5_1_2 , __a : int=0.0_2 , __a : str=1e-12 , __a : List[str]=0 , __a : Optional[int]=2 , __a : Tuple=1 , __a : str="absolute" , __a : Optional[Any]=True , **__a : Tuple , ) -> Any:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = position_embedding_type
__UpperCAmelCase = use_cache
| 654 | 0 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__lowerCAmelCase : str = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class A ( unittest.TestCase ):
def snake_case__ ( self : Optional[Any] , __a : Path , __a : Union[str, None] = None , __a : Union[List[str], None] = None , __a : Union[str, List[str], None] = None , __a : bool = True , ) -> Union[str, Any]:
__UpperCAmelCase = [file for file in os.listdir(__a ) if os.path.isfile(os.path.join(__a , __a ) )]
if identifier is not None:
__UpperCAmelCase = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__a , __a ):
for n_ in n_identifier:
__UpperCAmelCase = [file for file in files if n_ not in file]
else:
__UpperCAmelCase = [file for file in files if n_identifier not in file]
__UpperCAmelCase = ignore_files or []
ignore_files.append('''__init__.py''' )
__UpperCAmelCase = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('''Testing''' , __a )
if only_modules:
__UpperCAmelCase = file.split('''.''' )[0]
try:
__UpperCAmelCase = getattr(__a , __a )
__UpperCAmelCase = doctest.DocTestSuite(__a )
__UpperCAmelCase = unittest.TextTestRunner().run(__a )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f"""{module_identifier} is not a module.""" )
else:
__UpperCAmelCase = doctest.testfile(str('''..''' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def snake_case__ ( self : List[str] ) -> str:
__UpperCAmelCase = Path('''src/transformers''' )
__UpperCAmelCase = '''modeling'''
__UpperCAmelCase = [
'''modeling_ctrl.py''',
'''modeling_tf_ctrl.py''',
]
self.analyze_directory(__a , identifier=__a , ignore_files=__a )
def snake_case__ ( self : Optional[int] ) -> Union[str, Any]:
__UpperCAmelCase = Path('''src/transformers''' )
__UpperCAmelCase = '''tokenization'''
self.analyze_directory(__a , identifier=__a )
def snake_case__ ( self : List[Any] ) -> List[str]:
__UpperCAmelCase = Path('''src/transformers''' )
__UpperCAmelCase = '''configuration'''
self.analyze_directory(__a , identifier=__a )
def snake_case__ ( self : Optional[int] ) -> int:
__UpperCAmelCase = Path('''src/transformers''' )
__UpperCAmelCase = ['''configuration''', '''modeling''', '''tokenization''']
self.analyze_directory(__a , n_identifier=__a )
def snake_case__ ( self : Optional[Any] ) -> List[str]:
__UpperCAmelCase = Path('''docs/source''' )
__UpperCAmelCase = ['''favicon.ico''']
self.analyze_directory(__a , ignore_files=__a , only_modules=__a )
| 719 |
'''simple docstring'''
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
__lowerCAmelCase : str = 299_792_458
# Symbols
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = symbols("ct x y z")
def lowerCAmelCase ( UpperCamelCase__ : float ):
"""simple docstring"""
if velocity > c:
raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('''Speed must be greater than or equal to 1!''' )
return velocity / c
def lowerCAmelCase ( UpperCamelCase__ : float ):
"""simple docstring"""
return 1 / sqrt(1 - beta(UpperCamelCase__ ) ** 2 )
def lowerCAmelCase ( UpperCamelCase__ : float ):
"""simple docstring"""
return np.array(
[
[gamma(UpperCamelCase__ ), -gamma(UpperCamelCase__ ) * beta(UpperCamelCase__ ), 0, 0],
[-gamma(UpperCamelCase__ ) * beta(UpperCamelCase__ ), gamma(UpperCamelCase__ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : np.ndarray | None = None ):
"""simple docstring"""
# Ensure event is not empty
if event is None:
__UpperCAmelCase = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(UpperCamelCase__ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
__lowerCAmelCase : Dict = transform(29_979_245)
print("Example of four vector: ")
print(F"""ct' = {four_vector[0]}""")
print(F"""x' = {four_vector[1]}""")
print(F"""y' = {four_vector[2]}""")
print(F"""z' = {four_vector[3]}""")
# Substitute symbols with numerical values
__lowerCAmelCase : Union[str, Any] = {ct: c, x: 1, y: 1, z: 1}
__lowerCAmelCase : Optional[int] = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F"""\n{numerical_vector}""")
| 654 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : str = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = {
"nielsr/canine-s": 2_048,
}
# Unicode defines 1,114,112 total “codepoints”
__lowerCAmelCase : str = 1_114_112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
__lowerCAmelCase : List[str] = 0
__lowerCAmelCase : Optional[int] = 0Xe000
__lowerCAmelCase : Optional[int] = 0Xe001
__lowerCAmelCase : Optional[int] = 0Xe002
__lowerCAmelCase : Dict = 0Xe003
__lowerCAmelCase : Optional[int] = 0Xe004
# Maps special codepoints to human-readable names.
__lowerCAmelCase : Dict[int, str] = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
__lowerCAmelCase : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class A ( UpperCAmelCase ):
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[str] , __a : Union[str, Any]=chr(__a ) , __a : Optional[int]=chr(__a ) , __a : Union[str, Any]=chr(__a ) , __a : List[Any]=chr(__a ) , __a : Any=chr(__a ) , __a : Tuple=chr(__a ) , __a : Union[str, Any]=False , __a : Optional[int]=2_0_4_8 , **__a : str , ) -> Tuple:
__UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else bos_token
__UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else eos_token
__UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else sep_token
__UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else cls_token
__UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
super().__init__(
bos_token=__a , eos_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , add_prefix_space=__a , model_max_length=__a , **__a , )
# Creates a mapping for looking up the IDs of special symbols.
__UpperCAmelCase = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
__UpperCAmelCase = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
__UpperCAmelCase = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
__UpperCAmelCase = UNICODE_VOCAB_SIZE
__UpperCAmelCase = len(self._special_codepoints )
@property
def snake_case__ ( self : Union[str, Any] ) -> int:
return self._unicode_vocab_size
def snake_case__ ( self : str , __a : str ) -> List[str]:
return list(__a )
def snake_case__ ( self : Optional[int] , __a : str ) -> int:
try:
return ord(__a )
except TypeError:
raise ValueError(f"""invalid token: '{token}'""" )
def snake_case__ ( self : List[str] , __a : int ) -> str:
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(__a )
except TypeError:
raise ValueError(f"""invalid id: {index}""" )
def snake_case__ ( self : Any , __a : Union[str, Any] ) -> Optional[int]:
return "".join(__a )
def snake_case__ ( self : List[str] , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
__UpperCAmelCase = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def snake_case__ ( self : int , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
__UpperCAmelCase = [1] + ([0] * len(__a )) + [1]
if token_ids_a is not None:
result += ([0] * len(__a )) + [1]
return result
def snake_case__ ( self : Optional[int] , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
__UpperCAmelCase = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def snake_case__ ( self : Optional[Any] , __a : str , __a : Optional[str] = None ) -> Any:
return ()
| 720 |
'''simple docstring'''
import heapq
import sys
import numpy as np
__lowerCAmelCase : Any = tuple[int, int]
class A :
def __init__( self : Optional[int] ) -> int:
__UpperCAmelCase = []
__UpperCAmelCase = set()
def snake_case__ ( self : Optional[Any] ) -> List[Any]:
if not self.empty():
return self.elements[0][0]
else:
return float('''inf''' )
def snake_case__ ( self : Dict ) -> Optional[int]:
return len(self.elements ) == 0
def snake_case__ ( self : Optional[int] , __a : Optional[Any] , __a : Dict ) -> Optional[Any]:
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(__a )
else:
# update
# print("update", item)
__UpperCAmelCase = []
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def snake_case__ ( self : int , __a : Any ) -> int:
if item in self.set:
self.set.remove(__a )
__UpperCAmelCase = []
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def snake_case__ ( self : List[str] ) -> Dict:
return self.elements[0][1]
def snake_case__ ( self : Any ) -> List[str]:
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
self.set.remove(__a )
return (priority, item)
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ):
"""simple docstring"""
# euclidean distance
__UpperCAmelCase = np.array(UpperCamelCase__ )
__UpperCAmelCase = np.array(UpperCamelCase__ )
return np.linalg.norm(a - b )
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ):
"""simple docstring"""
# integer division by time variable
return consistent_heuristic(UpperCamelCase__ , UpperCamelCase__ ) // t
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ):
"""simple docstring"""
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : int , UpperCamelCase__ : TPos , UpperCamelCase__ : dict[TPos, float] ):
"""simple docstring"""
__UpperCAmelCase = g_function[start] + Wa * heuristics[i](UpperCamelCase__ , UpperCamelCase__ )
return ans
def lowerCAmelCase ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase = np.chararray((n, n) )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
__UpperCAmelCase = '''*'''
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
if (j, (n - 1) - i) in blocks:
__UpperCAmelCase = '''#'''
__UpperCAmelCase = '''-'''
__UpperCAmelCase = back_pointer[goal]
while x != start:
((__UpperCAmelCase) , (__UpperCAmelCase)) = x
# print(x)
__UpperCAmelCase = '''-'''
__UpperCAmelCase = back_pointer[x]
__UpperCAmelCase = '''-'''
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=''' ''' )
print('''<-- End position''' , end=''' ''' )
else:
print(grid[i][j] , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
print('''PATH TAKEN BY THE ALGORITHM IS:-''' )
__UpperCAmelCase = back_pointer[goal]
while x != start:
print(UpperCamelCase__ , end=''' ''' )
__UpperCAmelCase = back_pointer[x]
print(UpperCamelCase__ )
sys.exit()
def lowerCAmelCase ( UpperCamelCase__ : TPos ):
"""simple docstring"""
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , ):
"""simple docstring"""
for itera in range(UpperCamelCase__ ):
open_list[itera].remove_element(UpperCamelCase__ )
# print("s", s)
# print("j", j)
((__UpperCAmelCase) , (__UpperCAmelCase)) = s
__UpperCAmelCase = (x - 1, y)
__UpperCAmelCase = (x + 1, y)
__UpperCAmelCase = (x, y + 1)
__UpperCAmelCase = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(UpperCamelCase__ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(UpperCamelCase__ )
__UpperCAmelCase = -1
__UpperCAmelCase = float('''inf''' )
if valid(UpperCamelCase__ ) and g_function[neighbours] > g_function[s] + 1:
__UpperCAmelCase = g_function[s] + 1
__UpperCAmelCase = s
if neighbours not in close_list_anchor:
open_list[0].put(UpperCamelCase__ , key(UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ ) )
if neighbours not in close_list_inad:
for var in range(1 , UpperCamelCase__ ):
if key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) <= Wa * key(
UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ ):
open_list[j].put(
UpperCamelCase__ , key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(1_5 , 2_0 ):
some_list.append((x, 1_7) )
for x in range(1_0 , 1_9 ):
for y in range(1 , 1_5 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(1_2 , 1_9 ):
some_list.append((x, y) )
for x in range(3 , 1_3 ):
for y in range(1_6 , 1_9 ):
some_list.append((x, y) )
return some_list
__lowerCAmelCase : Optional[Any] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
__lowerCAmelCase : List[Any] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
__lowerCAmelCase : Dict = make_common_ground()
__lowerCAmelCase : int = blocks_blk
# hyper parameters
__lowerCAmelCase : Dict = 1
__lowerCAmelCase : List[str] = 1
__lowerCAmelCase : Union[str, Any] = 20
__lowerCAmelCase : Any = 3 # one consistent and two other inconsistent
# start and end destination
__lowerCAmelCase : Optional[Any] = (0, 0)
__lowerCAmelCase : Any = (n - 1, n - 1)
__lowerCAmelCase : Optional[int] = 1
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = {start: 0, goal: float('''inf''' )}
__UpperCAmelCase = {start: -1, goal: -1}
__UpperCAmelCase = []
__UpperCAmelCase = set()
for i in range(UpperCamelCase__ ):
open_list.append(PriorityQueue() )
open_list[i].put(UpperCamelCase__ , key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
__UpperCAmelCase = []
__UpperCAmelCase = []
while open_list[0].minkey() < float('''inf''' ):
for i in range(1 , UpperCamelCase__ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('''inf''' ):
do_something(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
__UpperCAmelCase , __UpperCAmelCase = open_list[i].top_show()
visited.add(UpperCamelCase__ )
expand_state(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
close_list_inad.append(UpperCamelCase__ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('''inf''' ):
do_something(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
__UpperCAmelCase = open_list[0].top_show()
visited.add(UpperCamelCase__ )
expand_state(
UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
close_list_anchor.append(UpperCamelCase__ )
print('''No path found to goal''' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(UpperCamelCase__ ):
if (j, i) in blocks:
print('''#''' , end=''' ''' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('''*''' , end=''' ''' )
else:
print('''-''' , end=''' ''' )
else:
print('''*''' , end=''' ''' )
if (j, i) == (n - 1, n - 1):
print('''<-- End position''' , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 654 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class A :
def __init__( self : List[Any] , __a : Any , ) -> Dict:
__UpperCAmelCase = parent
__UpperCAmelCase = 1_3
__UpperCAmelCase = 7
__UpperCAmelCase = True
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = True
__UpperCAmelCase = 9_9
__UpperCAmelCase = 3_2
__UpperCAmelCase = 2
__UpperCAmelCase = 4
__UpperCAmelCase = 3_7
__UpperCAmelCase = '''gelu'''
__UpperCAmelCase = 0.1
__UpperCAmelCase = 0.1
__UpperCAmelCase = 5_1_2
__UpperCAmelCase = 1_6
__UpperCAmelCase = 2
__UpperCAmelCase = 0.0_2
__UpperCAmelCase = 3
__UpperCAmelCase = 4
__UpperCAmelCase = None
def snake_case__ ( self : Optional[int] ) -> Dict:
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase = None
if self.use_input_mask:
__UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self : Union[str, Any] , __a : List[str] , __a : int , __a : Union[str, Any] , __a : Union[str, Any] , __a : List[Any] , __a : int ) -> Any:
__UpperCAmelCase = TFDistilBertModel(config=__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
__UpperCAmelCase = [input_ids, input_mask]
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : Tuple , __a : List[Any] , __a : int , __a : Tuple , __a : List[Any] , __a : Union[str, Any] , __a : List[Any] ) -> int:
__UpperCAmelCase = TFDistilBertForMaskedLM(config=__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : Optional[int] , __a : Any , __a : Union[str, Any] , __a : Optional[int] , __a : int , __a : Optional[Any] , __a : Optional[int] ) -> Dict:
__UpperCAmelCase = TFDistilBertForQuestionAnswering(config=__a )
__UpperCAmelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self : Any , __a : Optional[Any] , __a : List[str] , __a : Dict , __a : Dict , __a : int , __a : List[Any] ) -> Dict:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = TFDistilBertForSequenceClassification(__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : Union[str, Any] , __a : int , __a : str , __a : Union[str, Any] , __a : Optional[int] , __a : List[str] , __a : Dict ) -> str:
__UpperCAmelCase = self.num_choices
__UpperCAmelCase = TFDistilBertForMultipleChoice(__a )
__UpperCAmelCase = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__ ( self : int , __a : Optional[Any] , __a : int , __a : Tuple , __a : int , __a : Optional[int] , __a : Optional[int] ) -> int:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = TFDistilBertForTokenClassification(__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : str ) -> Any:
__UpperCAmelCase = self.prepare_config_and_inputs()
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) = config_and_inputs
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class A ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
a_ = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
a_ = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a_ = False
a_ = False
def snake_case__ ( self : Any ) -> Any:
__UpperCAmelCase = TFDistilBertModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=__a , dim=3_7 )
def snake_case__ ( self : List[Any] ) -> Optional[int]:
self.config_tester.run_common_tests()
def snake_case__ ( self : Any ) -> str:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__a )
def snake_case__ ( self : Tuple ) -> Dict:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__a )
def snake_case__ ( self : Union[str, Any] ) -> Any:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__a )
def snake_case__ ( self : Optional[Any] ) -> Dict:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__a )
def snake_case__ ( self : Any ) -> int:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__a )
def snake_case__ ( self : List[str] ) -> List[Any]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__a )
@slow
def snake_case__ ( self : Dict ) -> Tuple:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
__UpperCAmelCase = TFDistilBertModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_tf
class A ( unittest.TestCase ):
@slow
def snake_case__ ( self : int ) -> Dict:
__UpperCAmelCase = TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
__UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCAmelCase = model(__a )[0]
__UpperCAmelCase = [1, 6, 7_6_8]
self.assertEqual(output.shape , __a )
__UpperCAmelCase = tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-4 )
| 721 |
'''simple docstring'''
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
__lowerCAmelCase : List[Any] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__lowerCAmelCase : str = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
__lowerCAmelCase : int = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
__lowerCAmelCase : List[Any] = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__lowerCAmelCase : List[str] = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
__lowerCAmelCase : Optional[int] = [
("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"),
("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"),
("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"),
("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"),
("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"),
("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"),
("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"),
("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"),
("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"),
(
"zero-shot-object-detection",
"MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES",
"AutoModelForZeroShotObjectDetection",
),
("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"),
("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"),
("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"),
("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"),
(
"table-question-answering",
"MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForTableQuestionAnswering",
),
("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"),
("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"),
(
"next-sentence-prediction",
"MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES",
"AutoModelForNextSentencePrediction",
),
(
"audio-frame-classification",
"MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForAudioFrameClassification",
),
("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"),
(
"document-question-answering",
"MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForDocumentQuestionAnswering",
),
(
"visual-question-answering",
"MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForVisualQuestionAnswering",
),
("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"),
(
"zero-shot-image-classification",
"MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForZeroShotImageClassification",
),
("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"),
("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"),
("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"),
]
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , UpperCamelCase__ )
return [m.group(0 ) for m in matches]
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__UpperCAmelCase = {
config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
__UpperCAmelCase = collections.defaultdict(UpperCamelCase__ )
__UpperCAmelCase = collections.defaultdict(UpperCamelCase__ )
__UpperCAmelCase = collections.defaultdict(UpperCamelCase__ )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(UpperCamelCase__ ):
__UpperCAmelCase = None
if _re_tf_models.match(UpperCamelCase__ ) is not None:
__UpperCAmelCase = tf_models
__UpperCAmelCase = _re_tf_models.match(UpperCamelCase__ ).groups()[0]
elif _re_flax_models.match(UpperCamelCase__ ) is not None:
__UpperCAmelCase = flax_models
__UpperCAmelCase = _re_flax_models.match(UpperCamelCase__ ).groups()[0]
elif _re_pt_models.match(UpperCamelCase__ ) is not None:
__UpperCAmelCase = pt_models
__UpperCAmelCase = _re_pt_models.match(UpperCamelCase__ ).groups()[0]
if lookup_dict is not None:
while len(UpperCamelCase__ ) > 0:
if attr_name in model_prefix_to_model_type:
__UpperCAmelCase = True
break
# Try again after removing the last word in the name
__UpperCAmelCase = ''''''.join(camel_case_split(UpperCamelCase__ )[:-1] )
__UpperCAmelCase = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
__UpperCAmelCase = list(UpperCamelCase__ )
all_models.sort()
__UpperCAmelCase = {'''model_type''': all_models}
__UpperCAmelCase = [pt_models[t] for t in all_models]
__UpperCAmelCase = [tf_models[t] for t in all_models]
__UpperCAmelCase = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
__UpperCAmelCase = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
__UpperCAmelCase = '''AutoProcessor'''
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
__UpperCAmelCase = '''AutoTokenizer'''
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
__UpperCAmelCase = '''AutoFeatureExtractor'''
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
__UpperCAmelCase = '''AutoTokenizer'''
__UpperCAmelCase = [processors[t] for t in all_models]
return pd.DataFrame(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
__UpperCAmelCase = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""]
__UpperCAmelCase = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
# The type of pipeline may not exist in this framework
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ):
continue
# First extract all model_names
__UpperCAmelCase = []
for name in getattr(UpperCamelCase__ , UpperCamelCase__ ).values():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
model_names.append(UpperCamelCase__ )
else:
model_names.extend(list(UpperCamelCase__ ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def lowerCAmelCase ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ):
"""simple docstring"""
__UpperCAmelCase = get_frameworks_table()
__UpperCAmelCase = Dataset.from_pandas(UpperCamelCase__ )
__UpperCAmelCase = hf_hub_download(
'''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=UpperCamelCase__ )
__UpperCAmelCase = Dataset.from_json(UpperCamelCase__ )
__UpperCAmelCase = {
tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class'''])
for i in range(len(UpperCamelCase__ ) )
}
__UpperCAmelCase = update_pipeline_and_auto_class_table(UpperCamelCase__ )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
__UpperCAmelCase = sorted(table.keys() )
__UpperCAmelCase = pd.DataFrame(
{
'''model_class''': model_classes,
'''pipeline_tag''': [table[m][0] for m in model_classes],
'''auto_class''': [table[m][1] for m in model_classes],
} )
__UpperCAmelCase = Dataset.from_pandas(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(UpperCamelCase__ , '''frameworks.json''' ) )
tags_dataset.to_json(os.path.join(UpperCamelCase__ , '''pipeline_tags.json''' ) )
if commit_sha is not None:
__UpperCAmelCase = (
f"""Update with commit {commit_sha}\n\nSee: """
f"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
__UpperCAmelCase = '''Update'''
upload_folder(
repo_id='''huggingface/transformers-metadata''' , folder_path=UpperCamelCase__ , repo_type='''dataset''' , token=UpperCamelCase__ , commit_message=UpperCamelCase__ , )
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
__UpperCAmelCase = transformers_module.pipelines.SUPPORTED_TASKS
__UpperCAmelCase = []
for key in pipeline_tasks:
if key not in in_table:
__UpperCAmelCase = pipeline_tasks[key]['''pt''']
if isinstance(UpperCamelCase__ , (list, tuple) ):
__UpperCAmelCase = model[0]
__UpperCAmelCase = model.__name__
if model not in in_table.values():
missing.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
__UpperCAmelCase = ''', '''.join(UpperCamelCase__ )
raise ValueError(
'''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '''
f"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.")
parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.")
parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.")
__lowerCAmelCase : Tuple = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 654 | 0 |
from __future__ import annotations
from statistics import mean
def lowerCAmelCase ( UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = [0] * no_of_processes
__UpperCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(UpperCamelCase__ ):
__UpperCAmelCase = burst_time[i]
__UpperCAmelCase = []
__UpperCAmelCase = 0
__UpperCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__UpperCAmelCase = []
__UpperCAmelCase = -1
for i in range(UpperCamelCase__ ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
__UpperCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__UpperCAmelCase = i
total_time += burst_time[target_process]
completed += 1
__UpperCAmelCase = 0
__UpperCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def lowerCAmelCase ( UpperCamelCase__ : list[int] , UpperCamelCase__ : int , UpperCamelCase__ : list[int] ):
"""simple docstring"""
__UpperCAmelCase = [0] * no_of_processes
for i in range(UpperCamelCase__ ):
__UpperCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("[TEST CASE 01]")
__lowerCAmelCase : List[Any] = 4
__lowerCAmelCase : List[Any] = [2, 5, 3, 7]
__lowerCAmelCase : Tuple = [0, 0, 0, 0]
__lowerCAmelCase : Optional[int] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__lowerCAmelCase : Dict = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time")
for i, process_id in enumerate(list(range(1, 5))):
print(
F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 700 |
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__lowerCAmelCase : Optional[int] = [
"cross_validation.py",
"gradient_accumulation.py",
"local_sgd.py",
"multi_process_metrics.py",
"memory.py",
"automatic_gradient_accumulation.py",
"fsdp_with_peak_mem_tracking.py",
"deepspeed_with_config_support.py",
"megatron_lm_gpt_pretraining.py",
]
class A ( unittest.TestCase ):
def snake_case__ ( self : Any , __a : str , __a : bool , __a : str = None , __a : list = None ) -> Tuple:
__UpperCAmelCase = None
__UpperCAmelCase = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
__UpperCAmelCase = os.path.abspath('''examples''' )
for item in os.listdir(__a ):
if item not in EXCLUDE_EXAMPLES:
__UpperCAmelCase = os.path.join(__a , __a )
if os.path.isfile(__a ) and ".py" in item_path:
with self.subTest(
tested_script=__a , feature_script=__a , tested_section='''main()''' if parser_only else '''training_function()''' , ):
__UpperCAmelCase = compare_against_test(
os.path.join(__a , __a ) , __a , __a , __a )
__UpperCAmelCase = '''\n'''.join(__a )
if special_strings is not None:
for string in special_strings:
__UpperCAmelCase = diff.replace(__a , '''''' )
self.assertEqual(__a , '''''' )
def snake_case__ ( self : Optional[Any] ) -> str:
self.one_complete_example('''complete_nlp_example.py''' , __a )
self.one_complete_example('''complete_nlp_example.py''' , __a )
def snake_case__ ( self : List[str] ) -> Tuple:
__UpperCAmelCase = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
__UpperCAmelCase = [
''' ''' * 1_6 + '''{\n\n''',
''' ''' * 2_0 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 2_0 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 2_0 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 2_0 + '''"epoch": epoch,\n\n''',
''' ''' * 1_6 + '''},\n\n''',
''' ''' * 1_6 + '''step=epoch,\n''',
''' ''' * 1_2,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , __a , __a , __a )
self.one_complete_example('''complete_cv_example.py''' , __a , __a , __a )
@mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class A ( UpperCAmelCase ):
a_ = False
@classmethod
def snake_case__ ( cls : Tuple ) -> str:
super().setUpClass()
__UpperCAmelCase = tempfile.mkdtemp()
__UpperCAmelCase = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
__UpperCAmelCase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def snake_case__ ( cls : Dict ) -> int:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def snake_case__ ( self : Tuple ) -> Dict:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def snake_case__ ( self : str ) -> int:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
""".split()
__UpperCAmelCase = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def snake_case__ ( self : Any ) -> Any:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}
""".split()
__UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__a )
self.assertNotIn('''epoch 0:''' , __a )
self.assertIn('''epoch 1:''' , __a )
def snake_case__ ( self : Tuple ) -> Optional[int]:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}
""".split()
__UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__a )
if torch.cuda.is_available():
__UpperCAmelCase = torch.cuda.device_count()
else:
__UpperCAmelCase = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , __a )
self.assertIn('''epoch 1:''' , __a )
else:
self.assertIn('''epoch 0:''' , __a )
self.assertIn('''epoch 1:''' , __a )
@slow
def snake_case__ ( self : Any ) -> Optional[Any]:
__UpperCAmelCase = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
__UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__a )
__UpperCAmelCase = re.findall('''({.+})''' , __a )
__UpperCAmelCase = [r for r in results if '''accuracy''' in r][-1]
__UpperCAmelCase = ast.literal_eval(__a )
self.assertGreaterEqual(results['''accuracy'''] , 0.7_5 )
def snake_case__ ( self : Dict ) -> int:
__UpperCAmelCase = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Optional[Any] ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdir:
__UpperCAmelCase = f"""
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__a , '''tracking''' ) ) )
def snake_case__ ( self : Optional[int] ) -> List[Any]:
__UpperCAmelCase = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
__UpperCAmelCase = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 654 | 0 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
def wrapper(*UpperCamelCase__ : Tuple , **UpperCamelCase__ : List[Any] ):
__UpperCAmelCase = timeit.default_timer()
__UpperCAmelCase = func(*UpperCamelCase__ , **UpperCamelCase__ )
__UpperCAmelCase = timeit.default_timer() - starttime
return delta
__UpperCAmelCase = func.__name__
return wrapper
def lowerCAmelCase ( UpperCamelCase__ : dict , UpperCamelCase__ : str=1_0_0 , UpperCamelCase__ : Union[str, Any]=None ):
"""simple docstring"""
__UpperCAmelCase = []
__UpperCAmelCase = seq_shapes or {}
for i in range(UpperCamelCase__ ):
__UpperCAmelCase = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(UpperCamelCase__ , _ArrayXD ):
__UpperCAmelCase = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(UpperCamelCase__ , datasets.Value ):
if v.dtype == "string":
__UpperCAmelCase = '''The small grey turtle was surprisingly fast when challenged.'''
else:
__UpperCAmelCase = np.random.randint(1_0 , size=1 ).astype(v.dtype ).item()
elif isinstance(UpperCamelCase__ , datasets.Sequence ):
while isinstance(UpperCamelCase__ , datasets.Sequence ):
__UpperCAmelCase = v.feature
__UpperCAmelCase = seq_shapes[k]
__UpperCAmelCase = np.random.rand(*UpperCamelCase__ ).astype(v.dtype )
__UpperCAmelCase = data
dummy_data.append((i, example) )
return dummy_data
def lowerCAmelCase ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict=1_0_0 , UpperCamelCase__ : Optional[Any]=None ):
"""simple docstring"""
__UpperCAmelCase = generate_examples(UpperCamelCase__ , num_examples=UpperCamelCase__ , seq_shapes=UpperCamelCase__ )
with ArrowWriter(features=UpperCamelCase__ , path=UpperCamelCase__ ) as writer:
for key, record in dummy_data:
__UpperCAmelCase = features.encode_example(UpperCamelCase__ )
writer.write(UpperCamelCase__ )
__UpperCAmelCase , __UpperCAmelCase = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" )
__UpperCAmelCase = datasets.Dataset.from_file(filename=UpperCamelCase__ , info=datasets.DatasetInfo(features=UpperCamelCase__ ) )
return dataset
| 701 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__lowerCAmelCase : Any = ""
__lowerCAmelCase : int = ""
__lowerCAmelCase : Union[str, Any] = ""
__lowerCAmelCase : Any = 1 # (0 is vertical, 1 is horizontal)
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase = get_dataset(UpperCamelCase__ , UpperCamelCase__ )
print('''Processing...''' )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = update_image_and_anno(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for index, image in enumerate(UpperCamelCase__ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__UpperCAmelCase = random_chars(3_2 )
__UpperCAmelCase = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__UpperCAmelCase = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(f"""/{file_root}.jpg""" , UpperCamelCase__ , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(f"""Success {index+1}/{len(UpperCamelCase__ )} with {file_name}""" )
__UpperCAmelCase = []
for anno in new_annos[index]:
__UpperCAmelCase = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(UpperCamelCase__ )
with open(f"""/{file_root}.txt""" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase = []
__UpperCAmelCase = []
for label_file in glob.glob(os.path.join(UpperCamelCase__ , '''*.txt''' ) ):
__UpperCAmelCase = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(UpperCamelCase__ ) as in_file:
__UpperCAmelCase = in_file.readlines()
__UpperCAmelCase = os.path.join(UpperCamelCase__ , f"""{label_name}.jpg""" )
__UpperCAmelCase = []
for obj_list in obj_lists:
__UpperCAmelCase = obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(UpperCamelCase__ )
labels.append(UpperCamelCase__ )
return img_paths, labels
def lowerCAmelCase ( UpperCamelCase__ : list , UpperCamelCase__ : list , UpperCamelCase__ : int = 1 ):
"""simple docstring"""
__UpperCAmelCase = []
__UpperCAmelCase = []
__UpperCAmelCase = []
for idx in range(len(UpperCamelCase__ ) ):
__UpperCAmelCase = []
__UpperCAmelCase = img_list[idx]
path_list.append(UpperCamelCase__ )
__UpperCAmelCase = anno_list[idx]
__UpperCAmelCase = cva.imread(UpperCamelCase__ )
if flip_type == 1:
__UpperCAmelCase = cva.flip(UpperCamelCase__ , UpperCamelCase__ )
for bbox in img_annos:
__UpperCAmelCase = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
__UpperCAmelCase = cva.flip(UpperCamelCase__ , UpperCamelCase__ )
for bbox in img_annos:
__UpperCAmelCase = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(UpperCamelCase__ )
new_imgs_list.append(UpperCamelCase__ )
return new_imgs_list, new_annos_lists, path_list
def lowerCAmelCase ( UpperCamelCase__ : int = 3_2 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
__UpperCAmelCase = ascii_lowercase + digits
return "".join(random.choice(UpperCamelCase__ ) for _ in range(UpperCamelCase__ ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 654 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : Optional[Any] = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 702 |
'''simple docstring'''
from pathlib import Path
import fire
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = Path(UpperCamelCase__ )
__UpperCAmelCase = Path(UpperCamelCase__ )
dest_dir.mkdir(exist_ok=UpperCamelCase__ )
for path in src_dir.iterdir():
__UpperCAmelCase = [x.rstrip() for x in list(path.open().readlines() )][:n]
__UpperCAmelCase = dest_dir.joinpath(path.name )
print(UpperCamelCase__ )
dest_path.open('''w''' ).write('''\n'''.join(UpperCamelCase__ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 654 | 0 |
'''simple docstring'''
__lowerCAmelCase : str = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
__lowerCAmelCase : Optional[Any] = [{"type": "code", "content": INSTALL_CONTENT}]
__lowerCAmelCase : str = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 703 |
'''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__UpperCAmelCase = f"""Input value of [number={number}] must be an integer"""
raise TypeError(UpperCamelCase__ )
if number < 1:
__UpperCAmelCase = f"""Input value of [number={number}] must be > 0"""
raise ValueError(UpperCamelCase__ )
__UpperCAmelCase = 1
for i in range(1 , UpperCamelCase__ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 654 | 0 |
'''simple docstring'''
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class A ( unittest.TestCase ):
def __init__( self : Tuple , __a : Dict , __a : Optional[int]=2 , __a : List[Any]=5_6 , __a : int=True , __a : Union[str, Any]=True , __a : str=True , __a : Optional[int]=True , __a : Tuple=9_9 , __a : List[Any]=3_2 , __a : Optional[Any]=2 , __a : str=2 , __a : Any=7 , __a : Union[str, Any]="gelu_new" , __a : str=0.1 , __a : Optional[Any]=0.1 , __a : List[str]=5_1_2 , __a : Union[str, Any]=1_6 , __a : Tuple=2 , __a : Any=0.0_2 , __a : List[str]=4 , __a : Union[str, Any]="block_sparse" , __a : Optional[Any]=True , __a : Any=False , __a : List[Any]=2 , __a : Dict=3 , ) -> Tuple:
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = seq_length
__UpperCAmelCase = is_training
__UpperCAmelCase = use_attention_mask
__UpperCAmelCase = use_token_type_ids
__UpperCAmelCase = use_labels
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = type_sequence_label_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = num_choices
__UpperCAmelCase = rescale_embeddings
__UpperCAmelCase = attention_type
__UpperCAmelCase = use_bias
__UpperCAmelCase = block_size
__UpperCAmelCase = num_random_blocks
def snake_case__ ( self : Union[str, Any] ) -> Dict:
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase = None
if self.use_attention_mask:
__UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase = None
if self.use_token_type_ids:
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def snake_case__ ( self : Dict ) -> Optional[Any]:
__UpperCAmelCase = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = config_and_inputs
__UpperCAmelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_flax
class A ( UpperCAmelCase , unittest.TestCase ):
a_ = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
a_ = False
a_ = False
def snake_case__ ( self : Any ) -> int:
__UpperCAmelCase = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self : Any ) -> Dict:
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self : int ) -> str:
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self : Union[str, Any] ) -> str:
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self : Any ) -> Tuple:
super().test_hidden_states_output()
@slow
def snake_case__ ( self : List[str] ) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
__UpperCAmelCase = model_class_name.from_pretrained('''google/bigbird-roberta-base''' )
self.assertIsNotNone(__a )
def snake_case__ ( self : List[Any] ) -> Optional[int]:
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self : Tuple ) -> Optional[Any]:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCAmelCase = self._prepare_for_class(__a , __a )
__UpperCAmelCase = model_class(__a )
@jax.jit
def model_jitted(__a : Any , __a : Any=None , **__a : List[str] ):
return model(input_ids=__a , attention_mask=__a , **__a )
with self.subTest('''JIT Enabled''' ):
__UpperCAmelCase = model_jitted(**__a ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__UpperCAmelCase = model_jitted(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) )
for jitted_output, output in zip(__a , __a ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case__ ( self : Optional[int] , __a : Any , __a : Dict , __a : Optional[int] , __a : Tuple=1e-5 , __a : Tuple="outputs" , __a : List[str]=None ) -> List[Any]:
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith('''outputs.attentions''' ):
return
else:
super().check_pt_flax_outputs(__a , __a , __a , __a , __a , __a )
| 704 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def lowerCAmelCase ( ):
"""simple docstring"""
raise RuntimeError('''CUDA out of memory.''' )
class A ( nn.Module ):
def __init__( self : Optional[Any] ) -> int:
super().__init__()
__UpperCAmelCase = nn.Linear(3 , 4 )
__UpperCAmelCase = nn.BatchNormad(4 )
__UpperCAmelCase = nn.Linear(4 , 5 )
def snake_case__ ( self : List[str] , __a : Optional[int] ) -> Optional[int]:
return self.lineara(self.batchnorm(self.lineara(__a ) ) )
class A ( unittest.TestCase ):
def snake_case__ ( self : Optional[int] ) -> Any:
__UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(__a : Union[str, Any] ):
nonlocal batch_sizes
batch_sizes.append(__a )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(__a , [1_2_8, 6_4, 3_2, 1_6, 8] )
def snake_case__ ( self : str ) -> int:
__UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(__a : str , __a : Optional[int] ):
nonlocal batch_sizes
batch_sizes.append(__a )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
__UpperCAmelCase , __UpperCAmelCase = mock_training_loop_function('''hello''' )
self.assertListEqual(__a , [1_2_8, 6_4, 3_2, 1_6, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def snake_case__ ( self : Any ) -> int:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(__a : Optional[int] ):
pass
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def snake_case__ ( self : Any ) -> List[Any]:
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(__a : Dict ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def snake_case__ ( self : List[Any] ) -> List[str]:
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(__a : str , __a : Union[str, Any] , __a : int ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(__a ) as cm:
mock_training_loop_function(1_2_8 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(__a : Tuple ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def snake_case__ ( self : Any ) -> List[Any]:
__UpperCAmelCase = torch.cuda.memory_allocated()
__UpperCAmelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , __a )
__UpperCAmelCase = release_memory(__a )
self.assertEqual(torch.cuda.memory_allocated() , __a )
| 654 | 0 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 705 |
'''simple docstring'''
from __future__ import annotations
import math
def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = u
for i in range(1 , UpperCamelCase__ ):
__UpperCAmelCase = temp * (u - i)
return temp
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = int(input('''enter the numbers of values: ''' ) )
__UpperCAmelCase = []
for _ in range(UpperCamelCase__ ):
y.append([] )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
y[i].append(UpperCamelCase__ )
__UpperCAmelCase = 0
print('''enter the values of parameters in a list: ''' )
__UpperCAmelCase = list(map(UpperCamelCase__ , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(UpperCamelCase__ ):
__UpperCAmelCase = float(input() )
__UpperCAmelCase = int(input('''enter the value to interpolate: ''' ) )
__UpperCAmelCase = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , UpperCamelCase__ ):
for j in range(n - i ):
__UpperCAmelCase = y[j + 1][i - 1] - y[j][i - 1]
__UpperCAmelCase = y[0][0]
for i in range(1 , UpperCamelCase__ ):
summ += (ucal(UpperCamelCase__ , UpperCamelCase__ ) * y[0][i]) / math.factorial(UpperCamelCase__ )
print(f"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 654 | 0 |
'''simple docstring'''
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
__lowerCAmelCase : List[str] = logging.get_logger(__name__)
__lowerCAmelCase : Union[str, Any] = {
"artists_file": "artists.json",
"lyrics_file": "lyrics.json",
"genres_file": "genres.json",
}
__lowerCAmelCase : Any = {
"artists_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json",
},
"genres_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json",
},
"lyrics_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json",
},
}
__lowerCAmelCase : Dict = {
"jukebox": 512,
}
class A ( UpperCAmelCase ):
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_LYRIC_TOKENS_SIZES
a_ = ['''input_ids''', '''attention_mask''']
def __init__( self : int , __a : str , __a : Optional[int] , __a : Tuple , __a : Union[str, Any]=["v3", "v2", "v2"] , __a : List[Any]=5_1_2 , __a : Tuple=5 , __a : List[Any]="<|endoftext|>" , **__a : Optional[Any] , ) -> Any:
__UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else unk_token
super().__init__(
unk_token=__a , n_genres=__a , version=__a , max_n_lyric_tokens=__a , **__a , )
__UpperCAmelCase = version
__UpperCAmelCase = max_n_lyric_tokens
__UpperCAmelCase = n_genres
with open(__a , encoding='''utf-8''' ) as vocab_handle:
__UpperCAmelCase = json.load(__a )
with open(__a , encoding='''utf-8''' ) as vocab_handle:
__UpperCAmelCase = json.load(__a )
with open(__a , encoding='''utf-8''' ) as vocab_handle:
__UpperCAmelCase = json.load(__a )
__UpperCAmelCase = r'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'''
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 7_9:
__UpperCAmelCase = oov.replace(r'''\-\'''' , r'''\-+\'''' )
__UpperCAmelCase = regex.compile(__a )
__UpperCAmelCase = {v: k for k, v in self.artists_encoder.items()}
__UpperCAmelCase = {v: k for k, v in self.genres_encoder.items()}
__UpperCAmelCase = {v: k for k, v in self.lyrics_encoder.items()}
@property
def snake_case__ ( self : Optional[int] ) -> List[Any]:
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def snake_case__ ( self : Dict ) -> Optional[Any]:
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def snake_case__ ( self : Optional[Any] , __a : List[Any] , __a : Tuple , __a : Optional[int] ) -> List[Any]:
__UpperCAmelCase = [self.artists_encoder.get(__a , 0 ) for artist in list_artists]
for genres in range(len(__a ) ):
__UpperCAmelCase = [self.genres_encoder.get(__a , 0 ) for genre in list_genres[genres]]
__UpperCAmelCase = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
__UpperCAmelCase = [[self.lyrics_encoder.get(__a , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def snake_case__ ( self : List[str] , __a : List[Any] ) -> Optional[int]:
return list(__a )
def snake_case__ ( self : Union[str, Any] , __a : Union[str, Any] , __a : Any , __a : str , **__a : Optional[Any] ) -> Dict:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self.prepare_for_tokenization(__a , __a , __a )
__UpperCAmelCase = self._tokenize(__a )
return artist, genre, lyrics
def snake_case__ ( self : int , __a : str , __a : str , __a : str , __a : bool = False ) -> Tuple[str, str, str, Dict[str, Any]]:
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
__UpperCAmelCase = artists[idx].lower()
__UpperCAmelCase = [genres[idx].lower()]
else:
__UpperCAmelCase = self._normalize(artists[idx] ) + '''.v2'''
__UpperCAmelCase = [
self._normalize(__a ) + '''.v2''' for genre in genres[idx].split('''_''' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
__UpperCAmelCase = regex.compile(r'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+''' )
__UpperCAmelCase = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'''
__UpperCAmelCase = {vocab[index]: index + 1 for index in range(len(__a ) )}
__UpperCAmelCase = 0
__UpperCAmelCase = len(__a ) + 1
__UpperCAmelCase = self.vocab
__UpperCAmelCase = {v: k for k, v in self.vocab.items()}
__UpperCAmelCase = ''''''
else:
__UpperCAmelCase = regex.compile(r'''[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+''' )
__UpperCAmelCase = self._run_strip_accents(__a )
__UpperCAmelCase = lyrics.replace('''\\''' , '''\n''' )
__UpperCAmelCase = self.out_of_vocab.sub('''''' , __a ), [], []
return artists, genres, lyrics
def snake_case__ ( self : Optional[int] , __a : Tuple ) -> Optional[int]:
__UpperCAmelCase = unicodedata.normalize('''NFD''' , __a )
__UpperCAmelCase = []
for char in text:
__UpperCAmelCase = unicodedata.category(__a )
if cat == "Mn":
continue
output.append(__a )
return "".join(__a )
def snake_case__ ( self : int , __a : str ) -> str:
__UpperCAmelCase = (
[chr(__a ) for i in range(ord('''a''' ) , ord('''z''' ) + 1 )]
+ [chr(__a ) for i in range(ord('''A''' ) , ord('''Z''' ) + 1 )]
+ [chr(__a ) for i in range(ord('''0''' ) , ord('''9''' ) + 1 )]
+ ['''.''']
)
__UpperCAmelCase = frozenset(__a )
__UpperCAmelCase = re.compile(r'''_+''' )
__UpperCAmelCase = ''''''.join([c if c in accepted else '''_''' for c in text.lower()] )
__UpperCAmelCase = pattern.sub('''_''' , __a ).strip('''_''' )
return text
def snake_case__ ( self : Union[str, Any] , __a : List[str] ) -> str:
return " ".join(__a )
def snake_case__ ( self : List[str] , __a : Dict , __a : Optional[Union[str, TensorType]] = None , __a : bool = False ) -> Union[str, Any]:
# Convert to TensorType
if not isinstance(__a , __a ):
__UpperCAmelCase = TensorType(__a )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'''Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.''' )
import tensorflow as tf
__UpperCAmelCase = tf.constant
__UpperCAmelCase = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('''Unable to convert output to PyTorch tensors format, PyTorch is not installed.''' )
import torch
__UpperCAmelCase = torch.tensor
__UpperCAmelCase = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('''Unable to convert output to JAX tensors format, JAX is not installed.''' )
import jax.numpy as jnp # noqa: F811
__UpperCAmelCase = jnp.array
__UpperCAmelCase = _is_jax
else:
__UpperCAmelCase = np.asarray
__UpperCAmelCase = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
__UpperCAmelCase = [inputs]
if not is_tensor(__a ):
__UpperCAmelCase = as_tensor(__a )
except: # noqa E722
raise ValueError(
'''Unable to create tensor, you should probably activate truncation and/or padding '''
'''with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.''' )
return inputs
def __call__( self : Dict , __a : Optional[int] , __a : Union[str, Any] , __a : int="" , __a : Dict="pt" ) -> BatchEncoding:
__UpperCAmelCase = [0, 0, 0]
__UpperCAmelCase = [artist] * len(self.version )
__UpperCAmelCase = [genres] * len(self.version )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self.tokenize(__a , __a , __a )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self._convert_token_to_id(__a , __a , __a )
__UpperCAmelCase = [-INFINITY] * len(full_tokens[-1] )
__UpperCAmelCase = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=__a )
for i in range(len(self.version ) )
]
return BatchEncoding({'''input_ids''': input_ids, '''attention_masks''': attention_masks} )
def snake_case__ ( self : Tuple , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCAmelCase = os.path.join(
__a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''artists_file'''] )
with open(__a , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=__a ) )
__UpperCAmelCase = os.path.join(
__a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''genres_file'''] )
with open(__a , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=__a ) )
__UpperCAmelCase = os.path.join(
__a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''lyrics_file'''] )
with open(__a , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=__a ) )
return (artists_file, genres_file, lyrics_file)
def snake_case__ ( self : List[Any] , __a : Dict , __a : str , __a : str ) -> str:
__UpperCAmelCase = self.artists_decoder.get(__a )
__UpperCAmelCase = [self.genres_decoder.get(__a ) for genre in genres_index]
__UpperCAmelCase = [self.lyrics_decoder.get(__a ) for character in lyric_index]
return artist, genres, lyrics
| 706 |
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Dict = logging.get_logger(__name__)
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""encoder.deit.blocks.{i}.norm1.weight""", f"""encoder.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.norm1.bias""", f"""encoder.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.attn.proj.weight""", f"""encoder.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.attn.proj.bias""", f"""encoder.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.norm2.weight""", f"""encoder.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.norm2.bias""", f"""encoder.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc1.weight""", f"""encoder.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc1.bias""", f"""encoder.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc2.weight""", f"""encoder.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.mlp.fc2.bias""", f"""encoder.encoder.layer.{i}.output.dense.bias""") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''),
('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''),
('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''),
('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''),
('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''),
('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''),
] )
return rename_keys
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
__UpperCAmelCase = state_dict.pop(f"""encoder.deit.blocks.{i}.attn.qkv.weight""" )
__UpperCAmelCase = in_proj_weight[
: encoder_config.hidden_size, :
]
__UpperCAmelCase = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
__UpperCAmelCase = in_proj_weight[
-encoder_config.hidden_size :, :
]
def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase = dct.pop(UpperCamelCase__ )
__UpperCAmelCase = val
def lowerCAmelCase ( UpperCamelCase__ : Dict ):
"""simple docstring"""
if "handwritten" in checkpoint_url:
__UpperCAmelCase = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
__UpperCAmelCase = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'''
__UpperCAmelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert('''RGB''' )
return im
@torch.no_grad()
def lowerCAmelCase ( UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase = ViTConfig(image_size=3_8_4 , qkv_bias=UpperCamelCase__ )
__UpperCAmelCase = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
__UpperCAmelCase = 7_6_8
elif "large" in checkpoint_url:
# use ViT-large encoder
__UpperCAmelCase = 1_0_2_4
__UpperCAmelCase = 4_0_9_6
__UpperCAmelCase = 2_4
__UpperCAmelCase = 1_6
__UpperCAmelCase = 1_0_2_4
else:
raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
__UpperCAmelCase = False
__UpperCAmelCase = '''relu'''
__UpperCAmelCase = 1_0_2_4
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = False
# load HuggingFace model
__UpperCAmelCase = ViTModel(UpperCamelCase__ , add_pooling_layer=UpperCamelCase__ )
__UpperCAmelCase = TrOCRForCausalLM(UpperCamelCase__ )
__UpperCAmelCase = VisionEncoderDecoderModel(encoder=UpperCamelCase__ , decoder=UpperCamelCase__ )
model.eval()
# load state_dict of original model, rename some keys
__UpperCAmelCase = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='''cpu''' , check_hash=UpperCamelCase__ )['''model''']
__UpperCAmelCase = create_rename_keys(UpperCamelCase__ , UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
__UpperCAmelCase = state_dict.pop(UpperCamelCase__ )
if key.startswith('''decoder''' ) and "output_projection" not in key:
__UpperCAmelCase = val
else:
__UpperCAmelCase = val
# load state dict
model.load_state_dict(UpperCamelCase__ )
# Check outputs on an image
__UpperCAmelCase = ViTImageProcessor(size=encoder_config.image_size )
__UpperCAmelCase = RobertaTokenizer.from_pretrained('''roberta-large''' )
__UpperCAmelCase = TrOCRProcessor(UpperCamelCase__ , UpperCamelCase__ )
__UpperCAmelCase = processor(images=prepare_img(UpperCamelCase__ ) , return_tensors='''pt''' ).pixel_values
# verify logits
__UpperCAmelCase = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
__UpperCAmelCase = model(pixel_values=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ )
__UpperCAmelCase = outputs.logits
__UpperCAmelCase = torch.Size([1, 1, 5_0_2_6_5] )
if "trocr-base-handwritten" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-1.45_02, -4.66_83, -0.53_47, -2.92_91, 9.14_35, -3.05_71, 8.97_64, 1.75_60, 8.73_58, -1.53_11] )
elif "trocr-large-handwritten" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-2.64_37, -1.31_29, -2.25_96, -5.34_55, 6.35_39, 1.76_04, 5.49_91, 1.47_02, 5.61_13, 2.01_70] )
elif "trocr-base-printed" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-5.68_16, -5.83_88, 1.13_98, -6.90_34, 6.85_05, -2.43_93, 1.22_84, -1.02_32, -1.96_61, -3.92_10] )
elif "trocr-large-printed" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-6.01_62, -7.09_59, 4.41_55, -5.10_63, 7.04_68, -3.16_31, 2.64_66, -0.30_81, -0.81_06, -1.75_35] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :1_0] , UpperCamelCase__ , atol=1E-3 ), "First elements of logits not as expected"
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
__lowerCAmelCase : Optional[int] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 654 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : List[str] = logging.get_logger(__name__)
__lowerCAmelCase : Union[str, Any] = {
"MIT/ast-finetuned-audioset-10-10-0.4593": (
"https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"
),
}
class A ( UpperCAmelCase ):
a_ = '''audio-spectrogram-transformer'''
def __init__( self : str , __a : Optional[Any]=7_6_8 , __a : List[Any]=1_2 , __a : Optional[Any]=1_2 , __a : int=3_0_7_2 , __a : Optional[Any]="gelu" , __a : Optional[Any]=0.0 , __a : int=0.0 , __a : int=0.0_2 , __a : Optional[Any]=1e-12 , __a : Optional[Any]=1_6 , __a : Optional[int]=True , __a : int=1_0 , __a : Any=1_0 , __a : int=1_0_2_4 , __a : Tuple=1_2_8 , **__a : str , ) -> str:
super().__init__(**__a )
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = patch_size
__UpperCAmelCase = qkv_bias
__UpperCAmelCase = frequency_stride
__UpperCAmelCase = time_stride
__UpperCAmelCase = max_length
__UpperCAmelCase = num_mel_bins
| 707 |
'''simple docstring'''
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class A ( unittest.TestCase ):
def snake_case__ ( self : List[Any] , __a : List[str] , __a : Optional[Any] ) -> List[Any]:
return f"""gaussian_noise_s={seed}_shape={'_'.join([str(__a ) for s in shape] )}.npy"""
def snake_case__ ( self : Dict ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def snake_case__ ( self : Optional[Any] , __a : Tuple=0 , __a : List[Any]=(4, 4, 6_4, 6_4) , __a : Optional[Any]=False ) -> Tuple:
__UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
__UpperCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(__a , __a ) ) , dtype=__a )
return image
def snake_case__ ( self : int , __a : Optional[Any]=False , __a : Optional[Any]="CompVis/stable-diffusion-v1-4" ) -> Any:
__UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
__UpperCAmelCase = '''bf16''' if fpaa else None
__UpperCAmelCase , __UpperCAmelCase = FlaxUNetaDConditionModel.from_pretrained(
__a , subfolder='''unet''' , dtype=__a , revision=__a )
return model, params
def snake_case__ ( self : str , __a : int=0 , __a : Tuple=(4, 7_7, 7_6_8) , __a : Optional[int]=False ) -> Union[str, Any]:
__UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
__UpperCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(__a , __a ) ) , dtype=__a )
return hidden_states
@parameterized.expand(
[
# fmt: off
[8_3, 4, [-0.2_3_2_3, -0.1_3_0_4, 0.0_8_1_3, -0.3_0_9_3, -0.0_9_1_9, -0.1_5_7_1, -0.1_1_2_5, -0.5_8_0_6]],
[1_7, 0.5_5, [-0.0_8_3_1, -0.2_4_4_3, 0.0_9_0_1, -0.0_9_1_9, 0.3_3_9_6, 0.0_1_0_3, -0.3_7_4_3, 0.0_7_0_1]],
[8, 0.8_9, [-0.4_8_6_3, 0.0_8_5_9, 0.0_8_7_5, -0.1_6_5_8, 0.9_1_9_9, -0.0_1_1_4, 0.4_8_3_9, 0.4_6_3_9]],
[3, 1_0_0_0, [-0.5_6_4_9, 0.2_4_0_2, -0.5_5_1_8, 0.1_2_4_8, 1.1_3_2_8, -0.2_4_4_3, -0.0_3_2_5, -1.0_0_7_8]],
# fmt: on
] )
def snake_case__ ( self : Tuple , __a : Tuple , __a : str , __a : Optional[Any] ) -> Any:
__UpperCAmelCase , __UpperCAmelCase = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=__a )
__UpperCAmelCase = self.get_latents(__a , fpaa=__a )
__UpperCAmelCase = self.get_encoder_hidden_states(__a , fpaa=__a )
__UpperCAmelCase = model.apply(
{'''params''': params} , __a , jnp.array(__a , dtype=jnp.intaa ) , encoder_hidden_states=__a , ).sample
assert sample.shape == latents.shape
__UpperCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__UpperCAmelCase = jnp.array(__a , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(__a , __a , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[8_3, 4, [0.1_5_1_4, 0.0_8_0_7, 0.1_6_2_4, 0.1_0_1_6, -0.1_8_9_6, 0.0_2_6_3, 0.0_6_7_7, 0.2_3_1_0]],
[1_7, 0.5_5, [0.1_1_6_4, -0.0_2_1_6, 0.0_1_7_0, 0.1_5_8_9, -0.3_1_2_0, 0.1_0_0_5, -0.0_5_8_1, -0.1_4_5_8]],
[8, 0.8_9, [-0.1_7_5_8, -0.0_1_6_9, 0.1_0_0_4, -0.1_4_1_1, 0.1_3_1_2, 0.1_1_0_3, -0.1_9_9_6, 0.2_1_3_9]],
[3, 1_0_0_0, [0.1_2_1_4, 0.0_3_5_2, -0.0_7_3_1, -0.1_5_6_2, -0.0_9_9_4, -0.0_9_0_6, -0.2_3_4_0, -0.0_5_3_9]],
# fmt: on
] )
def snake_case__ ( self : Optional[Any] , __a : Optional[int] , __a : Optional[Any] , __a : Optional[Any] ) -> Union[str, Any]:
__UpperCAmelCase , __UpperCAmelCase = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=__a )
__UpperCAmelCase = self.get_latents(__a , shape=(4, 4, 9_6, 9_6) , fpaa=__a )
__UpperCAmelCase = self.get_encoder_hidden_states(__a , shape=(4, 7_7, 1_0_2_4) , fpaa=__a )
__UpperCAmelCase = model.apply(
{'''params''': params} , __a , jnp.array(__a , dtype=jnp.intaa ) , encoder_hidden_states=__a , ).sample
assert sample.shape == latents.shape
__UpperCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__UpperCAmelCase = jnp.array(__a , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(__a , __a , atol=1e-2 )
| 654 | 0 |
'''simple docstring'''
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : List[str] = logging.get_logger(__name__)
def lowerCAmelCase ( UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase = os.path.abspath(UpperCamelCase__ )
logger.info(f"""Converting TensorFlow checkpoint from {tf_path}""" )
# Load weights from TF model
__UpperCAmelCase = tf.train.list_variables(UpperCamelCase__ )
__UpperCAmelCase = []
__UpperCAmelCase = []
__UpperCAmelCase = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
__UpperCAmelCase = full_name.split('''/''' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f"""Skipping non-model layer {full_name}""" )
continue
if "optimizer" in full_name:
logger.info(f"""Skipping optimization layer {full_name}""" )
continue
if name[0] == "model":
# ignore initial 'model'
__UpperCAmelCase = name[1:]
# figure out how many levels deep the name is
__UpperCAmelCase = 0
for _name in name:
if _name.startswith('''layer_with_weights''' ):
depth += 1
else:
break
layer_depth.append(UpperCamelCase__ )
# read data
__UpperCAmelCase = tf.train.load_variable(UpperCamelCase__ , UpperCamelCase__ )
names.append('''/'''.join(UpperCamelCase__ ) )
arrays.append(UpperCamelCase__ )
logger.info(f"""Read a total of {len(UpperCamelCase__ ):,} layers""" )
# Sanity check
if len(set(UpperCamelCase__ ) ) != 1:
raise ValueError(f"""Found layer names with different depths (layer depth {list(set(UpperCamelCase__ ) )})""" )
__UpperCAmelCase = list(set(UpperCamelCase__ ) )[0]
if layer_depth != 1:
raise ValueError(
'''The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'''
''' heads.''' )
# convert layers
logger.info('''Converting weights...''' )
for full_name, array in zip(UpperCamelCase__ , UpperCamelCase__ ):
__UpperCAmelCase = full_name.split('''/''' )
__UpperCAmelCase = model
__UpperCAmelCase = []
for i, m_name in enumerate(UpperCamelCase__ ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('''layer_with_weights''' ):
__UpperCAmelCase = int(m_name.split('''-''' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['''embeddings''', '''LayerNorm'''] )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''embeddings''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''LayerNorm''' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['''encoder''', '''layer''', str(layer_num - 4 )] )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''encoder''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''layer''' )
__UpperCAmelCase = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['''pooler''', '''dense'''] )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''pooler''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''dense''' )
elif m_name == "embeddings":
trace.append('''embeddings''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''embeddings''' )
if layer_num == 0:
trace.append('''word_embeddings''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''word_embeddings''' )
elif layer_num == 1:
trace.append('''position_embeddings''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''position_embeddings''' )
elif layer_num == 2:
trace.append('''token_type_embeddings''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''token_type_embeddings''' )
else:
raise ValueError(f"""Unknown embedding layer with name {full_name}""" )
trace.append('''weight''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''weight''' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['''attention''', '''self'''] )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''attention''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''self''' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['''attention''', '''output''', '''LayerNorm'''] )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''attention''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''output''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''LayerNorm''' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['''attention''', '''output''', '''dense'''] )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''attention''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''output''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''dense''' )
elif m_name == "_output_dense":
# output dense
trace.extend(['''output''', '''dense'''] )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''output''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''dense''' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['''output''', '''LayerNorm'''] )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''output''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''LayerNorm''' )
elif m_name == "_key_dense":
# attention key
trace.append('''key''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''key''' )
elif m_name == "_query_dense":
# attention query
trace.append('''query''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''query''' )
elif m_name == "_value_dense":
# attention value
trace.append('''value''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''value''' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['''intermediate''', '''dense'''] )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''intermediate''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''dense''' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('''output''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''output''' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('''bias''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''bias''' )
elif m_name in ["kernel", "gamma"]:
trace.append('''weight''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''weight''' )
else:
logger.warning(f"""Ignored {m_name}""" )
# for certain layers reshape is necessary
__UpperCAmelCase = '''.'''.join(UpperCamelCase__ )
if re.match(R'''(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)''' , UpperCamelCase__ ) or re.match(
R'''(\S+)\.attention\.output\.dense\.weight''' , UpperCamelCase__ ):
__UpperCAmelCase = array.reshape(pointer.data.shape )
if "kernel" in full_name:
__UpperCAmelCase = array.transpose()
if pointer.shape == array.shape:
__UpperCAmelCase = torch.from_numpy(UpperCamelCase__ )
else:
raise ValueError(
f"""Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"""
f""" {array.shape}""" )
logger.info(f"""Successfully set variable {full_name} to PyTorch layer {trace}""" )
return model
def lowerCAmelCase ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any ):
"""simple docstring"""
logger.info(f"""Loading model based on config from {config_path}...""" )
__UpperCAmelCase = BertConfig.from_json_file(UpperCamelCase__ )
__UpperCAmelCase = BertModel(UpperCamelCase__ )
# Load weights from checkpoint
logger.info(f"""Loading weights from checkpoint {tf_checkpoint_path}...""" )
load_tfa_weights_in_bert(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
logger.info(f"""Saving PyTorch model to {pytorch_dump_path}...""" )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow 2.x checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model (must include filename).",
)
__lowerCAmelCase : List[str] = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 708 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
__lowerCAmelCase : Optional[int] = "examples/"
__lowerCAmelCase : Dict = {
"examples": (re.compile(r"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(r"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(r"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), r"\1version=\"VERSION\","),
"doc": (re.compile(r"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
__lowerCAmelCase : List[str] = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
__lowerCAmelCase : int = "README.md"
def lowerCAmelCase ( UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple ):
"""simple docstring"""
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__UpperCAmelCase = f.read()
__UpperCAmelCase , __UpperCAmelCase = REPLACE_PATTERNS[pattern]
__UpperCAmelCase = replace.replace('''VERSION''' , UpperCamelCase__ )
__UpperCAmelCase = re_pattern.sub(UpperCamelCase__ , UpperCamelCase__ )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
for folder, directories, fnames in os.walk(UpperCamelCase__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ , pattern='''examples''' )
def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : Any=False ):
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not patch:
update_version_in_examples(UpperCamelCase__ )
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = '''🤗 Transformers currently provides the following architectures'''
__UpperCAmelCase = '''1. Want to contribute a new model?'''
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__UpperCAmelCase = f.readlines()
# Find the start of the list.
__UpperCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__UpperCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
__UpperCAmelCase = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(UpperCamelCase__ )
def lowerCAmelCase ( ):
"""simple docstring"""
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
__UpperCAmelCase = f.read()
__UpperCAmelCase = REPLACE_PATTERNS['''init'''][0].search(UpperCamelCase__ ).groups()[0]
return packaging.version.parse(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : Any=False ):
"""simple docstring"""
__UpperCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
__UpperCAmelCase = default_version.base_version
elif patch:
__UpperCAmelCase = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
__UpperCAmelCase = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
__UpperCAmelCase = input(f"""Which version are you releasing? [{default_version}]""" )
if len(UpperCamelCase__ ) == 0:
__UpperCAmelCase = default_version
print(f"""Updating version to {version}.""" )
global_version_update(UpperCamelCase__ , patch=UpperCamelCase__ )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = get_version()
__UpperCAmelCase = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
__UpperCAmelCase = current_version.base_version
# Check with the user we got that right.
__UpperCAmelCase = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(UpperCamelCase__ ) == 0:
__UpperCAmelCase = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(UpperCamelCase__ )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
__lowerCAmelCase : Tuple = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 654 | 0 |
'''simple docstring'''
from manim import *
class SCREAMING_SNAKE_CASE_ ( UpperCAmelCase ):
def snake_case__ ( self : str ) -> List[str]:
__UpperCAmelCase = Rectangle(height=0.5 , width=0.5 )
__UpperCAmelCase = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
__UpperCAmelCase = Rectangle(height=0.2_5 , width=0.2_5 )
__UpperCAmelCase = [mem.copy() for i in range(6 )]
__UpperCAmelCase = [mem.copy() for i in range(6 )]
__UpperCAmelCase = VGroup(*__a ).arrange(__a , buff=0 )
__UpperCAmelCase = VGroup(*__a ).arrange(__a , buff=0 )
__UpperCAmelCase = VGroup(__a , __a ).arrange(__a , buff=0 )
__UpperCAmelCase = Text('''CPU''' , font_size=2_4 )
__UpperCAmelCase = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__a )
__UpperCAmelCase = [mem.copy() for i in range(4 )]
__UpperCAmelCase = VGroup(*__a ).arrange(__a , buff=0 )
__UpperCAmelCase = Text('''GPU''' , font_size=2_4 )
__UpperCAmelCase = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
gpu.move_to([-1, -1, 0] )
self.add(__a )
__UpperCAmelCase = [mem.copy() for i in range(6 )]
__UpperCAmelCase = VGroup(*__a ).arrange(__a , buff=0 )
__UpperCAmelCase = Text('''Model''' , font_size=2_4 )
__UpperCAmelCase = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
model.move_to([3, -1.0, 0] )
self.add(__a )
__UpperCAmelCase = []
__UpperCAmelCase = []
for i, rect in enumerate(__a ):
__UpperCAmelCase = fill.copy().set_fill(__a , opacity=0.8 )
target.move_to(__a )
model_arr.append(__a )
__UpperCAmelCase = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(__a , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__a )
self.add(*__a , *__a )
__UpperCAmelCase = [meta_mem.copy() for i in range(6 )]
__UpperCAmelCase = [meta_mem.copy() for i in range(6 )]
__UpperCAmelCase = VGroup(*__a ).arrange(__a , buff=0 )
__UpperCAmelCase = VGroup(*__a ).arrange(__a , buff=0 )
__UpperCAmelCase = VGroup(__a , __a ).arrange(__a , buff=0 )
__UpperCAmelCase = Text('''Disk''' , font_size=2_4 )
__UpperCAmelCase = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
disk.move_to([-4, -1.2_5, 0] )
self.add(__a , __a )
__UpperCAmelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__UpperCAmelCase = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__a , __a )
__UpperCAmelCase = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=1_8 , )
blue_text.next_to(__a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__a )
__UpperCAmelCase = MarkupText(
f"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__a ) )
__UpperCAmelCase = Square(0.3 )
input.set_fill(__a , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , __a , buff=0.5 )
self.play(Write(__a ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=__a , buff=0.0_2 )
self.play(MoveToTarget(__a ) )
self.play(FadeOut(__a ) )
__UpperCAmelCase = Arrow(start=__a , end=__a , color=__a , buff=0.5 )
a.next_to(model_arr[0].get_left() , __a , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
__UpperCAmelCase = MarkupText(
f"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__a , run_time=3 ) )
__UpperCAmelCase = {'''run_time''': 1, '''fade_in''': True, '''fade_out''': True, '''buff''': 0.0_2}
self.play(
Write(__a ) , Circumscribe(model_arr[0] , color=__a , **__a ) , Circumscribe(model_cpu_arr[0] , color=__a , **__a ) , Circumscribe(gpu_rect[0] , color=__a , **__a ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
__UpperCAmelCase = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.0_2 , __a , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.0_2 )
__UpperCAmelCase = AnimationGroup(
FadeOut(__a , run_time=0.5 ) , MoveToTarget(__a , run_time=0.5 ) , FadeIn(__a , run_time=0.5 ) , lag_ratio=0.2 )
self.play(__a )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
__UpperCAmelCase = 0.7
self.play(
Circumscribe(model_arr[i] , **__a ) , Circumscribe(cpu_left_col_base[i] , **__a ) , Circumscribe(cpu_left_col_base[i + 1] , color=__a , **__a ) , Circumscribe(gpu_rect[0] , color=__a , **__a ) , Circumscribe(model_arr[i + 1] , color=__a , **__a ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.0_2 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=__a , **__a ) , Circumscribe(cpu_left_col_base[-1] , color=__a , **__a ) , Circumscribe(gpu_rect[0] , color=__a , **__a ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
__UpperCAmelCase = a_c
__UpperCAmelCase = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.0_2 , buff=0.5 )
self.play(
FadeOut(__a ) , FadeOut(__a , run_time=0.5 ) , )
__UpperCAmelCase = MarkupText(f"""Inference on a model too large for GPU memory\nis successfully completed.""" , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(__a , run_time=3 ) , MoveToTarget(__a ) )
self.wait()
| 709 |
'''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : Tuple ):
"""simple docstring"""
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
__UpperCAmelCase = len(UpperCamelCase__ )
__UpperCAmelCase = max(UpperCamelCase__ )
__UpperCAmelCase = min(UpperCamelCase__ )
# create the counting array
__UpperCAmelCase = coll_max + 1 - coll_min
__UpperCAmelCase = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , UpperCamelCase__ ):
__UpperCAmelCase = counting_arr[i] + counting_arr[i - 1]
# create the output collection
__UpperCAmelCase = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , UpperCamelCase__ ) ):
__UpperCAmelCase = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def lowerCAmelCase ( UpperCamelCase__ : Any ):
"""simple docstring"""
return "".join([chr(UpperCamelCase__ ) for i in counting_sort([ord(UpperCamelCase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("thisisthestring") == "eghhiiinrsssttt"
__lowerCAmelCase : str = input("Enter numbers separated by a comma:\n").strip()
__lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(",")]
print(counting_sort(unsorted))
| 654 | 0 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
__lowerCAmelCase : List[Any] = "hf-internal-testing/tiny-random-bert"
__lowerCAmelCase : int = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert")
__lowerCAmelCase : List[str] = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6"
class A ( unittest.TestCase ):
def snake_case__ ( self : List[str] ) -> Union[str, Any]:
__UpperCAmelCase = cached_file(__a , __a )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(__a ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(__a , __a ) ) )
with open(os.path.join(__a , '''refs''' , '''main''' ) ) as f:
__UpperCAmelCase = f.read()
self.assertEqual(__a , os.path.join(__a , '''snapshots''' , __a , __a ) )
self.assertTrue(os.path.isfile(__a ) )
# File is cached at the same place the second time.
__UpperCAmelCase = cached_file(__a , __a )
self.assertEqual(__a , __a )
# Using a specific revision to test the full commit hash.
__UpperCAmelCase = cached_file(__a , __a , revision='''9b8c223''' )
self.assertEqual(__a , os.path.join(__a , '''snapshots''' , __a , __a ) )
def snake_case__ ( self : Tuple ) -> List[Any]:
with self.assertRaisesRegex(__a , '''is not a valid model identifier''' ):
__UpperCAmelCase = cached_file('''tiny-random-bert''' , __a )
with self.assertRaisesRegex(__a , '''is not a valid git identifier''' ):
__UpperCAmelCase = cached_file(__a , __a , revision='''aaaa''' )
with self.assertRaisesRegex(__a , '''does not appear to have a file named''' ):
__UpperCAmelCase = cached_file(__a , '''conf''' )
def snake_case__ ( self : List[Any] ) -> Optional[int]:
with self.assertRaisesRegex(__a , '''does not appear to have a file named''' ):
__UpperCAmelCase = cached_file(__a , '''conf''' )
with open(os.path.join(__a , '''refs''' , '''main''' ) ) as f:
__UpperCAmelCase = f.read()
self.assertTrue(os.path.isfile(os.path.join(__a , '''.no_exist''' , __a , '''conf''' ) ) )
__UpperCAmelCase = cached_file(__a , '''conf''' , _raise_exceptions_for_missing_entries=__a )
self.assertIsNone(__a )
__UpperCAmelCase = cached_file(__a , '''conf''' , local_files_only=__a , _raise_exceptions_for_missing_entries=__a )
self.assertIsNone(__a )
__UpperCAmelCase = mock.Mock()
__UpperCAmelCase = 5_0_0
__UpperCAmelCase = {}
__UpperCAmelCase = HTTPError
__UpperCAmelCase = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=__a ) as mock_head:
__UpperCAmelCase = cached_file(__a , '''conf''' , _raise_exceptions_for_connection_errors=__a )
self.assertIsNone(__a )
# This check we did call the fake head request
mock_head.assert_called()
def snake_case__ ( self : Optional[Any] ) -> Optional[Any]:
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __a ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __a ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __a ) )
def snake_case__ ( self : Optional[int] ) -> int:
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(__a , '''is not a valid model identifier''' ):
get_file_from_repo('''bert-base-case''' , __a )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(__a , '''is not a valid git identifier''' ):
get_file_from_repo('''bert-base-cased''' , __a , revision='''ahaha''' )
__UpperCAmelCase = get_file_from_repo('''bert-base-cased''' , __a )
# The name is the cached name which is not very easy to test, so instead we load the content.
__UpperCAmelCase = json.loads(open(__a , '''r''' ).read() )
self.assertEqual(config['''hidden_size'''] , 7_6_8 )
def snake_case__ ( self : Optional[int] ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase = Path(__a ) / '''a.txt'''
filename.touch()
self.assertEqual(get_file_from_repo(__a , '''a.txt''' ) , str(__a ) )
self.assertIsNone(get_file_from_repo(__a , '''b.txt''' ) )
| 710 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def lowerCAmelCase ( UpperCamelCase__ : str = "AAPL" ):
"""simple docstring"""
__UpperCAmelCase = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
__UpperCAmelCase = BeautifulSoup(requests.get(UpperCamelCase__ ).text , '''html.parser''' )
__UpperCAmelCase = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 654 | 0 |
'''simple docstring'''
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
__lowerCAmelCase : Any = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class A ( UpperCAmelCase ):
def __init__( self : Optional[Any] , __a : int = 1_0_1 ) -> str:
__UpperCAmelCase = length
def __len__( self : Tuple ) -> List[str]:
return self.length
def __getitem__( self : List[Any] , __a : Any ) -> int:
return i
class A :
def __call__( self : List[str] , __a : Any ) -> int:
return {"input_ids": torch.tensor(__a ), "labels": torch.tensor(__a )}
class A ( nn.Module ):
def __init__( self : str ) -> Any:
super().__init__()
# Add some (unused) params otherwise DDP will complain.
__UpperCAmelCase = nn.Linear(1_2_0 , 8_0 )
def snake_case__ ( self : int , __a : Union[str, Any] , __a : List[Any]=None ) -> Dict:
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class A ( UpperCAmelCase ):
@require_torch_neuroncore
def snake_case__ ( self : Optional[Any] ) -> Tuple:
__UpperCAmelCase = f"""--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
__UpperCAmelCase = self.get_auto_remove_tmp_dir()
__UpperCAmelCase = f"""--output_dir {output_dir}""".split()
__UpperCAmelCase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(__a , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class A ( UpperCAmelCase ):
@require_torch_multi_gpu
def snake_case__ ( self : int ) -> Any:
__UpperCAmelCase = f"""--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
__UpperCAmelCase = self.get_auto_remove_tmp_dir()
__UpperCAmelCase = f"""--output_dir {output_dir}""".split()
__UpperCAmelCase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(__a , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
__lowerCAmelCase : Optional[Any] = HfArgumentParser((TrainingArguments,))
__lowerCAmelCase : Dict = parser.parse_args_into_dataclasses()[0]
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, """
F"""distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"""
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
__lowerCAmelCase : List[Any] = DummyDataset(dataset_length)
def lowerCAmelCase ( UpperCamelCase__ : EvalPrediction ):
"""simple docstring"""
__UpperCAmelCase = list(range(len(UpperCamelCase__ ) ) )
__UpperCAmelCase = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
f"""{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}""" )
return {"success": success}
__lowerCAmelCase : str = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
__lowerCAmelCase : Union[str, Any] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__lowerCAmelCase : List[str] = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__lowerCAmelCase : Union[str, Any] = 2
__lowerCAmelCase : str = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__lowerCAmelCase : Optional[Any] = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__lowerCAmelCase : str = None
| 711 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def lowerCAmelCase ( UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = [0] * no_of_processes
__UpperCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(UpperCamelCase__ ):
__UpperCAmelCase = burst_time[i]
__UpperCAmelCase = []
__UpperCAmelCase = 0
__UpperCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__UpperCAmelCase = []
__UpperCAmelCase = -1
for i in range(UpperCamelCase__ ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
__UpperCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__UpperCAmelCase = i
total_time += burst_time[target_process]
completed += 1
__UpperCAmelCase = 0
__UpperCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def lowerCAmelCase ( UpperCamelCase__ : list[int] , UpperCamelCase__ : int , UpperCamelCase__ : list[int] ):
"""simple docstring"""
__UpperCAmelCase = [0] * no_of_processes
for i in range(UpperCamelCase__ ):
__UpperCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("[TEST CASE 01]")
__lowerCAmelCase : List[Any] = 4
__lowerCAmelCase : List[Any] = [2, 5, 3, 7]
__lowerCAmelCase : Tuple = [0, 0, 0, 0]
__lowerCAmelCase : Optional[int] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__lowerCAmelCase : Dict = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time")
for i, process_id in enumerate(list(range(1, 5))):
print(
F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 654 | 0 |
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def lowerCAmelCase ( UpperCamelCase__ : Tuple="ro" , UpperCamelCase__ : Any="en" , UpperCamelCase__ : Dict="wmt16" , UpperCamelCase__ : Dict=None ):
"""simple docstring"""
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
__UpperCAmelCase = f"""{src_lang}-{tgt_lang}"""
print(f"""Converting {dataset}-{pair}""" )
__UpperCAmelCase = datasets.load_dataset(UpperCamelCase__ , UpperCamelCase__ )
if save_dir is None:
__UpperCAmelCase = f"""{dataset}-{pair}"""
__UpperCAmelCase = Path(UpperCamelCase__ )
save_dir.mkdir(exist_ok=UpperCamelCase__ )
for split in ds.keys():
print(f"""Splitting {split} with {ds[split].num_rows} records""" )
# to save to val.source, val.target like summary datasets
__UpperCAmelCase = '''val''' if split == '''validation''' else split
__UpperCAmelCase = save_dir.joinpath(f"""{fn}.source""" )
__UpperCAmelCase = save_dir.joinpath(f"""{fn}.target""" )
__UpperCAmelCase = src_path.open('''w+''' )
__UpperCAmelCase = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
__UpperCAmelCase = x['''translation''']
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(f"""Saved {dataset} dataset to {save_dir}""" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 712 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : List[str] , **__a : Union[str, Any] ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : List[str] , **__a : int ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : int , **__a : List[Any] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : int , *__a : Optional[Any] , **__a : Tuple ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Tuple , **__a : Optional[Any] ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : str , **__a : Tuple ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : List[str] , **__a : int ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : List[str] , **__a : Optional[int] ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : str , **__a : Any ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[str] , *__a : Dict , **__a : List[str] ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : Any , **__a : List[Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : List[str] , **__a : Union[str, Any] ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : Optional[int] , **__a : Optional[int] ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Dict , **__a : List[Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : List[str] , **__a : List[str] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : Optional[Any] , **__a : int ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : Optional[int] , **__a : Dict ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Tuple , **__a : Any ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Optional[Any] , **__a : int ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Tuple , **__a : Optional[int] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : str , **__a : List[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : List[Any] , **__a : int ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : int , **__a : str ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : List[Any] , **__a : Any ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Tuple , *__a : str , **__a : Tuple ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : List[str] , **__a : Optional[Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Optional[int] , **__a : Union[str, Any] ) -> int:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : Union[str, Any] , **__a : List[str] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Dict , **__a : Union[str, Any] ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : Union[str, Any] , **__a : Any ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : int , **__a : int ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Optional[int] , **__a : int ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : Tuple , **__a : str ) -> Dict:
requires_backends(cls , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : int , **UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : List[Any] ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Any , **UpperCamelCase__ : List[str] ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : str , **UpperCamelCase__ : str ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Dict , **UpperCamelCase__ : Dict ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Tuple , **UpperCamelCase__ : int ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Tuple ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : str , **__a : List[Any] ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : int , **__a : int ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Dict , **__a : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : str , **__a : List[str] ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Dict , **__a : Tuple ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : List[Any] , **__a : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : List[Any] , **__a : List[Any] ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : Tuple , **__a : int ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Any , **__a : int ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : Optional[Any] , **__a : Tuple ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : str , **__a : int ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Dict , **__a : Any ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : str , **__a : str ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Union[str, Any] , **__a : str ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : int , **__a : Tuple ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Tuple , **__a : str ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Any , **__a : Dict ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : str , **__a : List[str] ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : int , **__a : int ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : str , **__a : List[Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : int , **__a : List[str] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : int , *__a : Union[str, Any] , **__a : Optional[Any] ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : List[Any] , **__a : str ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : Optional[Any] , **__a : Any ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[str] , *__a : List[str] , **__a : Dict ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Union[str, Any] , **__a : Optional[int] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : int , **__a : Dict ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[str] , *__a : List[str] , **__a : Union[str, Any] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : int , **__a : Dict ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Tuple , **__a : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : str , **__a : Any ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Dict , **__a : Optional[Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : str , **__a : Union[str, Any] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : Optional[Any] , **__a : List[str] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Union[str, Any] , **__a : Any ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Union[str, Any] , **__a : List[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Tuple , *__a : Optional[int] , **__a : List[Any] ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : List[str] , **__a : Dict ) -> List[str]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : Optional[Any] , **__a : List[Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Any , **__a : str ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : Optional[Any] , **__a : int ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : int , **__a : Optional[Any] ) -> int:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : List[str] , **__a : Dict ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Any , **__a : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Dict , **__a : Union[str, Any] ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : Tuple , **__a : Optional[int] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : List[Any] , **__a : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : List[Any] , **__a : int ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Optional[Any] , **__a : str ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : Tuple , **__a : int ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Optional[int] , **__a : Tuple ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : Union[str, Any] , **__a : List[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : List[Any] , **__a : List[str] ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[Any] , *__a : Optional[int] , **__a : int ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Union[str, Any] , **__a : List[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : int , **__a : Any ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : Any , **__a : int ) -> Tuple:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Union[str, Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : List[Any] , **__a : Dict ) -> int:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[Any] , *__a : int , **__a : Optional[int] ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : List[Any] , **__a : Any ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : Tuple , **__a : List[Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : Dict , **__a : Optional[int] ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : Any , **__a : Dict ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Any , **__a : Any ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : Tuple , **__a : Optional[int] ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Optional[Any] , **__a : Optional[Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : List[Any] , **__a : Dict ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : Union[str, Any] , **__a : Optional[int] ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Any , **__a : Optional[int] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : Union[str, Any] , **__a : List[str] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Optional[int] , **__a : List[Any] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : Dict , **__a : int ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Optional[int] , **__a : Union[str, Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Union[str, Any] , **__a : int ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : Optional[Any] , **__a : int ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : List[Any] , **__a : Optional[int] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Optional[Any] , **__a : Optional[int] ) -> Tuple:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : Optional[int] , **__a : List[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : List[str] , **__a : Union[str, Any] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : Tuple , **__a : Tuple ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : List[str] , **__a : Tuple ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Dict , **__a : Tuple ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Union[str, Any] , **__a : Optional[int] ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : List[str] , **__a : int ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : Tuple , **__a : Optional[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Any , **__a : List[str] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : int , **__a : int ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : Any , **__a : List[Any] ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : int , **__a : str ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : Dict , **__a : Optional[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : List[str] , **__a : int ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Any ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : str , **__a : Optional[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : List[Any] , **__a : List[str] ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Optional[Any] , **__a : str ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : List[Any] , **__a : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : List[Any] , **__a : Optional[Any] ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Any ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : List[str] , **__a : Any ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : List[str] , **__a : Dict ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : Dict , **__a : Optional[Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : str , **__a : Any ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[str] , *__a : Union[str, Any] , **__a : Optional[int] ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Dict , **__a : Tuple ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Any , **__a : Any ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : int , *__a : Any , **__a : Optional[Any] ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : int , **__a : List[Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Dict , **__a : Tuple ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Tuple , *__a : int , **__a : Optional[Any] ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Any , **__a : Optional[int] ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : List[str] , **__a : List[Any] ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : int , **__a : Any ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : List[str] , **__a : List[str] ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
| 654 | 0 |
'''simple docstring'''
__lowerCAmelCase : List[Any] = [0, 2, 4, 6, 8]
__lowerCAmelCase : Any = [1, 3, 5, 7, 9]
def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ):
"""simple docstring"""
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 1_0
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
__UpperCAmelCase = 0
for digit in range(1_0 ):
__UpperCAmelCase = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 1_0 , UpperCamelCase__ , UpperCamelCase__ )
return result
__UpperCAmelCase = 0
for digita in range(1_0 ):
__UpperCAmelCase = digita
if (remainder + digita) % 2 == 0:
__UpperCAmelCase = ODD_DIGITS
else:
__UpperCAmelCase = EVEN_DIGITS
for digita in other_parity_digits:
__UpperCAmelCase = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 1_0 , UpperCamelCase__ , UpperCamelCase__ , )
return result
def lowerCAmelCase ( UpperCamelCase__ : int = 9 ):
"""simple docstring"""
__UpperCAmelCase = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(UpperCamelCase__ , 0 , [0] * length , UpperCamelCase__ )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 713 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 654 | 0 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCAmelCase : Tuple = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__lowerCAmelCase : List[str] = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
__lowerCAmelCase : List[Any] = {"allegro/herbert-base-cased": 514}
__lowerCAmelCase : int = {}
class A ( UpperCAmelCase ):
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_INIT_CONFIGURATION
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = HerbertTokenizer
def __init__( self : int , __a : Any=None , __a : int=None , __a : Union[str, Any]=None , __a : List[str]="<s>" , __a : Tuple="<unk>" , __a : Any="<pad>" , __a : int="<mask>" , __a : List[str]="</s>" , **__a : Any , ) -> Optional[int]:
super().__init__(
__a , __a , tokenizer_file=__a , cls_token=__a , unk_token=__a , pad_token=__a , mask_token=__a , sep_token=__a , **__a , )
def snake_case__ ( self : List[Any] , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase = [self.cls_token_id]
__UpperCAmelCase = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case__ ( self : Optional[int] , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is None:
return [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1] + ([0] * len(__a )) + [1]
def snake_case__ ( self : Optional[Any] , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self : int , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
__UpperCAmelCase = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
| 714 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : Optional[Any] = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 654 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
__lowerCAmelCase : str = {}
class A ( UpperCAmelCase ):
a_ = '''llama'''
a_ = ['''past_key_values''']
def __init__( self : List[Any] , __a : int=3_2_0_0_0 , __a : Optional[int]=4_0_9_6 , __a : Tuple=1_1_0_0_8 , __a : Optional[Any]=3_2 , __a : List[str]=3_2 , __a : Dict=None , __a : Any="silu" , __a : Tuple=2_0_4_8 , __a : Dict=0.0_2 , __a : Optional[Any]=1e-6 , __a : List[str]=True , __a : List[str]=0 , __a : Optional[Any]=1 , __a : List[str]=2 , __a : List[Any]=1 , __a : List[Any]=False , __a : Optional[int]=None , **__a : Dict , ) -> List[Any]:
__UpperCAmelCase = vocab_size
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = hidden_size
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = num_key_value_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = initializer_range
__UpperCAmelCase = rms_norm_eps
__UpperCAmelCase = pretraining_tp
__UpperCAmelCase = use_cache
__UpperCAmelCase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , tie_word_embeddings=__a , **__a , )
def snake_case__ ( self : str ) -> int:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __a ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"""got {self.rope_scaling}""" )
__UpperCAmelCase = self.rope_scaling.get('''type''' , __a )
__UpperCAmelCase = self.rope_scaling.get('''factor''' , __a )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(__a , __a ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 715 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , ):
"""simple docstring"""
__UpperCAmelCase = {}
if train_file is not None:
__UpperCAmelCase = [train_file]
if eval_file is not None:
__UpperCAmelCase = [eval_file]
if test_file is not None:
__UpperCAmelCase = [test_file]
__UpperCAmelCase = datasets.load_dataset('''csv''' , data_files=UpperCamelCase__ )
__UpperCAmelCase = list(ds[list(files.keys() )[0]].features.keys() )
__UpperCAmelCase = features_name.pop(UpperCamelCase__ )
__UpperCAmelCase = list(set(ds[list(files.keys() )[0]][label_name] ) )
__UpperCAmelCase = {label: i for i, label in enumerate(UpperCamelCase__ )}
__UpperCAmelCase = tokenizer.model_input_names
__UpperCAmelCase = {}
if len(UpperCamelCase__ ) == 1:
for k in files.keys():
__UpperCAmelCase = ds[k].map(
lambda UpperCamelCase__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' ) , batched=UpperCamelCase__ , )
elif len(UpperCamelCase__ ) == 2:
for k in files.keys():
__UpperCAmelCase = ds[k].map(
lambda UpperCamelCase__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' , ) , batched=UpperCamelCase__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names}
__UpperCAmelCase = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names}
__UpperCAmelCase = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names}
__UpperCAmelCase = labelaid[ex[label_name]]
yield (d, label)
__UpperCAmelCase = (
tf.data.Dataset.from_generator(
UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__UpperCAmelCase = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__UpperCAmelCase = (
tf.data.Dataset.from_generator(
UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__UpperCAmelCase = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__UpperCAmelCase = (
tf.data.Dataset.from_generator(
UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__UpperCAmelCase = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__lowerCAmelCase : List[Any] = logging.getLogger(__name__)
@dataclass
class A :
a_ = field(metadata={'''help''': '''Which column contains the label'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the training file'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the development file'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the test file'''} )
a_ = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class A :
a_ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def lowerCAmelCase ( ):
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
f"""16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=UpperCamelCase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(UpperCamelCase__ ) , labelaid=UpperCamelCase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__UpperCAmelCase = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , )
def compute_metrics(UpperCamelCase__ : EvalPrediction ) -> Dict:
__UpperCAmelCase = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__UpperCAmelCase = TFTrainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=UpperCamelCase__ , eval_dataset=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCAmelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__UpperCAmelCase = trainer.evaluate()
__UpperCAmelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(UpperCamelCase__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
results.update(UpperCamelCase__ )
return results
if __name__ == "__main__":
main()
| 654 | 0 |
def lowerCAmelCase ( UpperCamelCase__ : int = 3 , UpperCamelCase__ : int = 7 , UpperCamelCase__ : int = 1_0_0_0_0_0_0 ):
"""simple docstring"""
__UpperCAmelCase = 0
__UpperCAmelCase = 1
for current_denominator in range(1 , limit + 1 ):
__UpperCAmelCase = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
__UpperCAmelCase = current_numerator
__UpperCAmelCase = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_000_000))
| 716 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class A :
def __init__( self : List[Any] , __a : Any , ) -> Dict:
__UpperCAmelCase = parent
__UpperCAmelCase = 1_3
__UpperCAmelCase = 7
__UpperCAmelCase = True
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = True
__UpperCAmelCase = 9_9
__UpperCAmelCase = 3_2
__UpperCAmelCase = 2
__UpperCAmelCase = 4
__UpperCAmelCase = 3_7
__UpperCAmelCase = '''gelu'''
__UpperCAmelCase = 0.1
__UpperCAmelCase = 0.1
__UpperCAmelCase = 5_1_2
__UpperCAmelCase = 1_6
__UpperCAmelCase = 2
__UpperCAmelCase = 0.0_2
__UpperCAmelCase = 3
__UpperCAmelCase = 4
__UpperCAmelCase = None
def snake_case__ ( self : Optional[int] ) -> Dict:
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase = None
if self.use_input_mask:
__UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self : Union[str, Any] , __a : List[str] , __a : int , __a : Union[str, Any] , __a : Union[str, Any] , __a : List[Any] , __a : int ) -> Any:
__UpperCAmelCase = TFDistilBertModel(config=__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
__UpperCAmelCase = [input_ids, input_mask]
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : Tuple , __a : List[Any] , __a : int , __a : Tuple , __a : List[Any] , __a : Union[str, Any] , __a : List[Any] ) -> int:
__UpperCAmelCase = TFDistilBertForMaskedLM(config=__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : Optional[int] , __a : Any , __a : Union[str, Any] , __a : Optional[int] , __a : int , __a : Optional[Any] , __a : Optional[int] ) -> Dict:
__UpperCAmelCase = TFDistilBertForQuestionAnswering(config=__a )
__UpperCAmelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self : Any , __a : Optional[Any] , __a : List[str] , __a : Dict , __a : Dict , __a : int , __a : List[Any] ) -> Dict:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = TFDistilBertForSequenceClassification(__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : Union[str, Any] , __a : int , __a : str , __a : Union[str, Any] , __a : Optional[int] , __a : List[str] , __a : Dict ) -> str:
__UpperCAmelCase = self.num_choices
__UpperCAmelCase = TFDistilBertForMultipleChoice(__a )
__UpperCAmelCase = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__ ( self : int , __a : Optional[Any] , __a : int , __a : Tuple , __a : int , __a : Optional[int] , __a : Optional[int] ) -> int:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = TFDistilBertForTokenClassification(__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : str ) -> Any:
__UpperCAmelCase = self.prepare_config_and_inputs()
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) = config_and_inputs
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class A ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
a_ = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
a_ = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a_ = False
a_ = False
def snake_case__ ( self : Any ) -> Any:
__UpperCAmelCase = TFDistilBertModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=__a , dim=3_7 )
def snake_case__ ( self : List[Any] ) -> Optional[int]:
self.config_tester.run_common_tests()
def snake_case__ ( self : Any ) -> str:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__a )
def snake_case__ ( self : Tuple ) -> Dict:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__a )
def snake_case__ ( self : Union[str, Any] ) -> Any:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__a )
def snake_case__ ( self : Optional[Any] ) -> Dict:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__a )
def snake_case__ ( self : Any ) -> int:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__a )
def snake_case__ ( self : List[str] ) -> List[Any]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__a )
@slow
def snake_case__ ( self : Dict ) -> Tuple:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
__UpperCAmelCase = TFDistilBertModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_tf
class A ( unittest.TestCase ):
@slow
def snake_case__ ( self : int ) -> Dict:
__UpperCAmelCase = TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
__UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCAmelCase = model(__a )[0]
__UpperCAmelCase = [1, 6, 7_6_8]
self.assertEqual(output.shape , __a )
__UpperCAmelCase = tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-4 )
| 654 | 0 |
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowerCAmelCase : Optional[Any] = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class A ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Any , __a : str , __a : Optional[Any] , __a : str=None , __a : Union[str, Any]=1 ) -> List[str]:
__UpperCAmelCase = tokenizer
__UpperCAmelCase = dataset
__UpperCAmelCase = len(__a ) if n_tasks is None else n_tasks
__UpperCAmelCase = n_copies
def __iter__( self : Union[str, Any] ) -> Dict:
__UpperCAmelCase = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
__UpperCAmelCase = self.tokenizer(__a , padding=__a , return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class A ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , __a : List[Any] , __a : Optional[int] , __a : Optional[Any] ) -> Tuple:
__UpperCAmelCase = start_length
__UpperCAmelCase = eof_strings
__UpperCAmelCase = tokenizer
def __call__( self : Optional[Any] , __a : List[Any] , __a : Union[str, Any] , **__a : List[Any] ) -> Tuple:
__UpperCAmelCase = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
__UpperCAmelCase = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(__a )
def lowerCAmelCase ( UpperCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase = re.split('''(%s)''' % '''|'''.join(UpperCamelCase__ ) , UpperCamelCase__ )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int=2_0 , **UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase = defaultdict(UpperCamelCase__ ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(UpperCamelCase__ ) ):
with torch.no_grad():
__UpperCAmelCase = batch['''ids'''].shape[-1]
__UpperCAmelCase = accelerator.unwrap_model(UpperCamelCase__ ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=UpperCamelCase__ , **UpperCamelCase__ )
# each task is generated batch_size times
__UpperCAmelCase = batch['''task_id'''].repeat(UpperCamelCase__ )
__UpperCAmelCase = accelerator.pad_across_processes(
UpperCamelCase__ , dim=1 , pad_index=tokenizer.pad_token_id )
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather((generated_tokens, generated_tasks) )
__UpperCAmelCase = generated_tokens.cpu().numpy()
__UpperCAmelCase = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(UpperCamelCase__ , UpperCamelCase__ ):
gen_token_dict[task].append(UpperCamelCase__ )
__UpperCAmelCase = [[] for _ in range(UpperCamelCase__ )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
__UpperCAmelCase = tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
code_gens[task].append(remove_last_block(UpperCamelCase__ ) )
return code_gens
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = HfArgumentParser(UpperCamelCase__ )
__UpperCAmelCase = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
__UpperCAmelCase = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
__UpperCAmelCase = '''false'''
if args.num_workers is None:
__UpperCAmelCase = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
__UpperCAmelCase = Accelerator()
set_seed(args.seed , device_specific=UpperCamelCase__ )
# Load model and tokenizer
__UpperCAmelCase = AutoTokenizer.from_pretrained(args.model_ckpt )
__UpperCAmelCase = tokenizer.eos_token
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
__UpperCAmelCase = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , UpperCamelCase__ , UpperCamelCase__ )] ),
}
# Load evaluation dataset and metric
__UpperCAmelCase = load_dataset('''openai_humaneval''' )
__UpperCAmelCase = load_metric('''code_eval''' )
__UpperCAmelCase = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
__UpperCAmelCase = args.n_samples // args.batch_size
__UpperCAmelCase = TokenizedDataset(UpperCamelCase__ , human_eval['''test'''] , n_copies=UpperCamelCase__ , n_tasks=UpperCamelCase__ )
# do not confuse args.batch_size, which is actually the num_return_sequences
__UpperCAmelCase = DataLoader(UpperCamelCase__ , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
__UpperCAmelCase = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
__UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ )
__UpperCAmelCase = complete_code(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , n_tasks=UpperCamelCase__ , batch_size=args.batch_size , **UpperCamelCase__ , )
if accelerator.is_main_process:
__UpperCAmelCase = []
for task in tqdm(range(UpperCamelCase__ ) ):
__UpperCAmelCase = human_eval['''test'''][task]['''test''']
__UpperCAmelCase = f"""check({human_eval['test'][task]['entry_point']})"""
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
__UpperCAmelCase , __UpperCAmelCase = code_eval_metric.compute(
references=UpperCamelCase__ , predictions=UpperCamelCase__ , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 717 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__lowerCAmelCase : List[Any] = {
"configuration_audio_spectrogram_transformer": [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ASTConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ASTForAudioClassification",
"ASTModel",
"ASTPreTrainedModel",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] = ["ASTFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 654 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : str = {
"configuration_lilt": ["LILT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LiltConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
"LILT_PRETRAINED_MODEL_ARCHIVE_LIST",
"LiltForQuestionAnswering",
"LiltForSequenceClassification",
"LiltForTokenClassification",
"LiltModel",
"LiltPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 718 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
class A ( UpperCAmelCase ):
a_ = '''bert-generation'''
def __init__( self : str , __a : str=5_0_3_5_8 , __a : int=1_0_2_4 , __a : Optional[Any]=2_4 , __a : Any=1_6 , __a : int=4_0_9_6 , __a : Any="gelu" , __a : Union[str, Any]=0.1 , __a : Any=0.1 , __a : Union[str, Any]=5_1_2 , __a : int=0.0_2 , __a : str=1e-12 , __a : List[str]=0 , __a : Optional[int]=2 , __a : Tuple=1 , __a : str="absolute" , __a : Optional[Any]=True , **__a : Tuple , ) -> Any:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = position_embedding_type
__UpperCAmelCase = use_cache
| 654 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] ): # noqa: E741
"""simple docstring"""
while r - l > 1:
__UpperCAmelCase = (l + r) // 2
if v[m] >= key:
__UpperCAmelCase = m
else:
__UpperCAmelCase = m # noqa: E741
return r
def lowerCAmelCase ( UpperCamelCase__ : list[int] ):
"""simple docstring"""
if len(UpperCamelCase__ ) == 0:
return 0
__UpperCAmelCase = [0] * len(UpperCamelCase__ )
__UpperCAmelCase = 1
__UpperCAmelCase = v[0]
for i in range(1 , len(UpperCamelCase__ ) ):
if v[i] < tail[0]:
__UpperCAmelCase = v[i]
elif v[i] > tail[length - 1]:
__UpperCAmelCase = v[i]
length += 1
else:
__UpperCAmelCase = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
'''simple docstring'''
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
__lowerCAmelCase : str = 299_792_458
# Symbols
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = symbols("ct x y z")
def lowerCAmelCase ( UpperCamelCase__ : float ):
"""simple docstring"""
if velocity > c:
raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('''Speed must be greater than or equal to 1!''' )
return velocity / c
def lowerCAmelCase ( UpperCamelCase__ : float ):
"""simple docstring"""
return 1 / sqrt(1 - beta(UpperCamelCase__ ) ** 2 )
def lowerCAmelCase ( UpperCamelCase__ : float ):
"""simple docstring"""
return np.array(
[
[gamma(UpperCamelCase__ ), -gamma(UpperCamelCase__ ) * beta(UpperCamelCase__ ), 0, 0],
[-gamma(UpperCamelCase__ ) * beta(UpperCamelCase__ ), gamma(UpperCamelCase__ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : np.ndarray | None = None ):
"""simple docstring"""
# Ensure event is not empty
if event is None:
__UpperCAmelCase = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(UpperCamelCase__ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
__lowerCAmelCase : Dict = transform(29_979_245)
print("Example of four vector: ")
print(F"""ct' = {four_vector[0]}""")
print(F"""x' = {four_vector[1]}""")
print(F"""y' = {four_vector[2]}""")
print(F"""z' = {four_vector[3]}""")
# Substitute symbols with numerical values
__lowerCAmelCase : Union[str, Any] = {ct: c, x: 1, y: 1, z: 1}
__lowerCAmelCase : Optional[int] = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F"""\n{numerical_vector}""")
| 654 | 0 |
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__lowerCAmelCase : List[str] = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def lowerCAmelCase ( UpperCamelCase__ : Tuple ):
"""simple docstring"""
__UpperCAmelCase = test_results.split(''' ''' )
__UpperCAmelCase = 0
__UpperCAmelCase = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
__UpperCAmelCase = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(UpperCamelCase__ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def lowerCAmelCase ( UpperCamelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase = {}
__UpperCAmelCase = None
__UpperCAmelCase = False
for line in failures_short_lines.split('''\n''' ):
if re.search(R'''_ \[doctest\]''' , UpperCamelCase__ ):
__UpperCAmelCase = True
__UpperCAmelCase = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
__UpperCAmelCase = line
__UpperCAmelCase = False
return failures
class A :
def __init__( self : Union[str, Any] , __a : str , __a : Dict ) -> str:
__UpperCAmelCase = title
__UpperCAmelCase = doc_test_results['''time_spent'''].split(''',''' )[0]
__UpperCAmelCase = doc_test_results['''success''']
__UpperCAmelCase = doc_test_results['''failures''']
__UpperCAmelCase = self.n_success + self.n_failures
# Failures and success of the modeling tests
__UpperCAmelCase = doc_test_results
@property
def snake_case__ ( self : Optional[int] ) -> str:
__UpperCAmelCase = [self._time_spent]
__UpperCAmelCase = 0
for time in time_spent:
__UpperCAmelCase = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(__a ) == 1:
__UpperCAmelCase = [0, 0, time_parts[0]]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3_6_0_0 + minutes * 6_0 + seconds
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = total_secs // 3_6_0_0, (total_secs % 3_6_0_0) // 6_0, total_secs % 6_0
return f"""{int(__a )}h{int(__a )}m{int(__a )}s"""
@property
def snake_case__ ( self : Optional[int] ) -> Dict:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def snake_case__ ( self : List[str] ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"""🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.""",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
@property
def snake_case__ ( self : int ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"""There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"""
f""" {self.time}."""
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
@property
def snake_case__ ( self : Optional[int] ) -> Dict:
__UpperCAmelCase = 4_0
__UpperCAmelCase = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(__a , __a )}
__UpperCAmelCase = ''''''
for category, failures in category_failures.items():
if len(__a ) == 0:
continue
if report != "":
report += "\n\n"
report += f"""*{category} failures*:""".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(__a )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"""The following examples had failures:\n\n\n{report}\n""",
},
}
@property
def snake_case__ ( self : str ) -> str:
__UpperCAmelCase = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(__a )
@staticmethod
def snake_case__ ( ) -> Dict:
__UpperCAmelCase = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': f"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(__a )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text='''There was an issue running the tests.''' , blocks=__a , )
def snake_case__ ( self : int ) -> Union[str, Any]:
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
__UpperCAmelCase = f"""{self.n_failures} failures out of {self.n_tests} tests,""" if self.n_failures else '''All tests passed.'''
__UpperCAmelCase = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , blocks=self.payload , text=__a , )
def snake_case__ ( self : Tuple , __a : Optional[Any] , __a : Union[str, Any] , __a : str , __a : List[Any] ) -> Tuple:
__UpperCAmelCase = ''''''
for key, value in failures.items():
__UpperCAmelCase = value[:2_0_0] + ''' [Truncated]''' if len(__a ) > 2_5_0 else value
failures_text += f"""*{key}*\n_{value}_\n\n"""
__UpperCAmelCase = job_name
__UpperCAmelCase = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
__UpperCAmelCase = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def snake_case__ ( self : str ) -> List[str]:
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
__UpperCAmelCase = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
__UpperCAmelCase = sorted(self.doc_test_results.items() , key=lambda __a : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
__UpperCAmelCase = f"""*Num failures* :{len(job_result['failed'] )} \n"""
__UpperCAmelCase = job_result['''failures''']
__UpperCAmelCase = self.get_reply_blocks(__a , __a , __a , text=__a )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text=f"""Results for {job}""" , blocks=__a , thread_ts=self.thread_ts['''ts'''] , )
time.sleep(1 )
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = os.environ['''GITHUB_RUN_ID''']
__UpperCAmelCase = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"""
__UpperCAmelCase = requests.get(UpperCamelCase__ ).json()
__UpperCAmelCase = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
__UpperCAmelCase = math.ceil((result['''total_count'''] - 1_0_0) / 1_0_0 )
for i in range(UpperCamelCase__ ):
__UpperCAmelCase = requests.get(url + f"""&page={i + 2}""" ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''' , UpperCamelCase__ )
return {}
def lowerCAmelCase ( UpperCamelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase = {}
if os.path.exists(UpperCamelCase__ ):
__UpperCAmelCase = os.listdir(UpperCamelCase__ )
for file in files:
try:
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , encoding='''utf-8''' ) as f:
__UpperCAmelCase = f.read()
except UnicodeDecodeError as e:
raise ValueError(f"""Could not open {os.path.join(UpperCamelCase__ , UpperCamelCase__ )}.""" ) from e
return _artifact
def lowerCAmelCase ( ):
"""simple docstring"""
class A :
def __init__( self : Union[str, Any] , __a : str ) -> int:
__UpperCAmelCase = name
__UpperCAmelCase = []
def __str__( self : str ) -> List[str]:
return self.name
def snake_case__ ( self : Optional[Any] , __a : str ) -> List[Any]:
self.paths.append({'''name''': self.name, '''path''': path} )
__UpperCAmelCase = {}
__UpperCAmelCase = filter(os.path.isdir , os.listdir() )
for directory in directories:
__UpperCAmelCase = directory
if artifact_name not in _available_artifacts:
__UpperCAmelCase = Artifact(UpperCamelCase__ )
_available_artifacts[artifact_name].add_path(UpperCamelCase__ )
return _available_artifacts
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = get_job_links()
__lowerCAmelCase : Any = retrieve_available_artifacts()
__lowerCAmelCase : List[Any] = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__lowerCAmelCase : Dict = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__lowerCAmelCase : str = github_actions_job_links.get("run_doctests")
__lowerCAmelCase : Dict = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
__lowerCAmelCase : Tuple = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
__lowerCAmelCase : int = handle_test_results(artifact["stats"])
__lowerCAmelCase : Dict = failed
__lowerCAmelCase : str = success
__lowerCAmelCase : Optional[Any] = time_spent[1:-1] + ", "
__lowerCAmelCase : Dict = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
__lowerCAmelCase : Any = line.replace("FAILED ", "")
__lowerCAmelCase : Optional[Any] = line.split()[0].replace("\n", "")
if "::" in line:
__lowerCAmelCase : Optional[int] = line.split("::")
else:
__lowerCAmelCase : Any = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__lowerCAmelCase : Optional[Any] = docs[file_regex]
doc_test_results[category]["failed"].append(test)
__lowerCAmelCase : Optional[int] = all_failures[test] if test in all_failures else "N/A"
__lowerCAmelCase : Union[str, Any] = failure
break
__lowerCAmelCase : int = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 720 |
'''simple docstring'''
import heapq
import sys
import numpy as np
__lowerCAmelCase : Any = tuple[int, int]
class A :
def __init__( self : Optional[int] ) -> int:
__UpperCAmelCase = []
__UpperCAmelCase = set()
def snake_case__ ( self : Optional[Any] ) -> List[Any]:
if not self.empty():
return self.elements[0][0]
else:
return float('''inf''' )
def snake_case__ ( self : Dict ) -> Optional[int]:
return len(self.elements ) == 0
def snake_case__ ( self : Optional[int] , __a : Optional[Any] , __a : Dict ) -> Optional[Any]:
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(__a )
else:
# update
# print("update", item)
__UpperCAmelCase = []
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def snake_case__ ( self : int , __a : Any ) -> int:
if item in self.set:
self.set.remove(__a )
__UpperCAmelCase = []
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def snake_case__ ( self : List[str] ) -> Dict:
return self.elements[0][1]
def snake_case__ ( self : Any ) -> List[str]:
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
self.set.remove(__a )
return (priority, item)
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ):
"""simple docstring"""
# euclidean distance
__UpperCAmelCase = np.array(UpperCamelCase__ )
__UpperCAmelCase = np.array(UpperCamelCase__ )
return np.linalg.norm(a - b )
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ):
"""simple docstring"""
# integer division by time variable
return consistent_heuristic(UpperCamelCase__ , UpperCamelCase__ ) // t
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ):
"""simple docstring"""
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : int , UpperCamelCase__ : TPos , UpperCamelCase__ : dict[TPos, float] ):
"""simple docstring"""
__UpperCAmelCase = g_function[start] + Wa * heuristics[i](UpperCamelCase__ , UpperCamelCase__ )
return ans
def lowerCAmelCase ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase = np.chararray((n, n) )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
__UpperCAmelCase = '''*'''
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
if (j, (n - 1) - i) in blocks:
__UpperCAmelCase = '''#'''
__UpperCAmelCase = '''-'''
__UpperCAmelCase = back_pointer[goal]
while x != start:
((__UpperCAmelCase) , (__UpperCAmelCase)) = x
# print(x)
__UpperCAmelCase = '''-'''
__UpperCAmelCase = back_pointer[x]
__UpperCAmelCase = '''-'''
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=''' ''' )
print('''<-- End position''' , end=''' ''' )
else:
print(grid[i][j] , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
print('''PATH TAKEN BY THE ALGORITHM IS:-''' )
__UpperCAmelCase = back_pointer[goal]
while x != start:
print(UpperCamelCase__ , end=''' ''' )
__UpperCAmelCase = back_pointer[x]
print(UpperCamelCase__ )
sys.exit()
def lowerCAmelCase ( UpperCamelCase__ : TPos ):
"""simple docstring"""
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , ):
"""simple docstring"""
for itera in range(UpperCamelCase__ ):
open_list[itera].remove_element(UpperCamelCase__ )
# print("s", s)
# print("j", j)
((__UpperCAmelCase) , (__UpperCAmelCase)) = s
__UpperCAmelCase = (x - 1, y)
__UpperCAmelCase = (x + 1, y)
__UpperCAmelCase = (x, y + 1)
__UpperCAmelCase = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(UpperCamelCase__ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(UpperCamelCase__ )
__UpperCAmelCase = -1
__UpperCAmelCase = float('''inf''' )
if valid(UpperCamelCase__ ) and g_function[neighbours] > g_function[s] + 1:
__UpperCAmelCase = g_function[s] + 1
__UpperCAmelCase = s
if neighbours not in close_list_anchor:
open_list[0].put(UpperCamelCase__ , key(UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ ) )
if neighbours not in close_list_inad:
for var in range(1 , UpperCamelCase__ ):
if key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) <= Wa * key(
UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ ):
open_list[j].put(
UpperCamelCase__ , key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(1_5 , 2_0 ):
some_list.append((x, 1_7) )
for x in range(1_0 , 1_9 ):
for y in range(1 , 1_5 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(1_2 , 1_9 ):
some_list.append((x, y) )
for x in range(3 , 1_3 ):
for y in range(1_6 , 1_9 ):
some_list.append((x, y) )
return some_list
__lowerCAmelCase : Optional[Any] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
__lowerCAmelCase : List[Any] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
__lowerCAmelCase : Dict = make_common_ground()
__lowerCAmelCase : int = blocks_blk
# hyper parameters
__lowerCAmelCase : Dict = 1
__lowerCAmelCase : List[str] = 1
__lowerCAmelCase : Union[str, Any] = 20
__lowerCAmelCase : Any = 3 # one consistent and two other inconsistent
# start and end destination
__lowerCAmelCase : Optional[Any] = (0, 0)
__lowerCAmelCase : Any = (n - 1, n - 1)
__lowerCAmelCase : Optional[int] = 1
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = {start: 0, goal: float('''inf''' )}
__UpperCAmelCase = {start: -1, goal: -1}
__UpperCAmelCase = []
__UpperCAmelCase = set()
for i in range(UpperCamelCase__ ):
open_list.append(PriorityQueue() )
open_list[i].put(UpperCamelCase__ , key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
__UpperCAmelCase = []
__UpperCAmelCase = []
while open_list[0].minkey() < float('''inf''' ):
for i in range(1 , UpperCamelCase__ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('''inf''' ):
do_something(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
__UpperCAmelCase , __UpperCAmelCase = open_list[i].top_show()
visited.add(UpperCamelCase__ )
expand_state(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
close_list_inad.append(UpperCamelCase__ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('''inf''' ):
do_something(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
__UpperCAmelCase = open_list[0].top_show()
visited.add(UpperCamelCase__ )
expand_state(
UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
close_list_anchor.append(UpperCamelCase__ )
print('''No path found to goal''' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(UpperCamelCase__ ):
if (j, i) in blocks:
print('''#''' , end=''' ''' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('''*''' , end=''' ''' )
else:
print('''-''' , end=''' ''' )
else:
print('''*''' , end=''' ''' )
if (j, i) == (n - 1, n - 1):
print('''<-- End position''' , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 654 | 0 |
'''simple docstring'''
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def lowerCAmelCase ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str]=1E-1_2 ):
"""simple docstring"""
__UpperCAmelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(UpperCamelCase__ , axis=1 ) , a_min=UpperCamelCase__ ) ).T
__UpperCAmelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(UpperCamelCase__ , axis=1 ) , a_min=UpperCamelCase__ ) ).T
return jnp.matmul(UpperCamelCase__ , norm_emb_a.T )
class A ( nn.Module ):
a_ = 4_2
a_ = jnp.floataa
def snake_case__ ( self : int ) -> str:
__UpperCAmelCase = FlaxCLIPVisionModule(self.config.vision_config )
__UpperCAmelCase = nn.Dense(self.config.projection_dim , use_bias=__a , dtype=self.dtype )
__UpperCAmelCase = self.param('''concept_embeds''' , jax.nn.initializers.ones , (1_7, self.config.projection_dim) )
__UpperCAmelCase = self.param(
'''special_care_embeds''' , jax.nn.initializers.ones , (3, self.config.projection_dim) )
__UpperCAmelCase = self.param('''concept_embeds_weights''' , jax.nn.initializers.ones , (1_7,) )
__UpperCAmelCase = self.param('''special_care_embeds_weights''' , jax.nn.initializers.ones , (3,) )
def __call__( self : Tuple , __a : Any ) -> str:
__UpperCAmelCase = self.vision_model(__a )[1]
__UpperCAmelCase = self.visual_projection(__a )
__UpperCAmelCase = jax_cosine_distance(__a , self.special_care_embeds )
__UpperCAmelCase = jax_cosine_distance(__a , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
__UpperCAmelCase = 0.0
__UpperCAmelCase = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
__UpperCAmelCase = jnp.round(__a , 3 )
__UpperCAmelCase = jnp.any(special_scores > 0 , axis=1 , keepdims=__a )
# Use a lower threshold if an image has any special care concept
__UpperCAmelCase = is_special_care * 0.0_1
__UpperCAmelCase = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
__UpperCAmelCase = jnp.round(__a , 3 )
__UpperCAmelCase = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class A ( UpperCAmelCase ):
a_ = CLIPConfig
a_ = '''clip_input'''
a_ = FlaxStableDiffusionSafetyCheckerModule
def __init__( self : Optional[int] , __a : CLIPConfig , __a : Optional[Tuple] = None , __a : int = 0 , __a : jnp.dtype = jnp.floataa , __a : bool = True , **__a : Optional[Any] , ) -> Optional[Any]:
if input_shape is None:
__UpperCAmelCase = (1, 2_2_4, 2_2_4, 3)
__UpperCAmelCase = self.module_class(config=__a , dtype=__a , **__a )
super().__init__(__a , __a , input_shape=__a , seed=__a , dtype=__a , _do_init=_do_init )
def snake_case__ ( self : List[Any] , __a : jax.random.KeyArray , __a : Tuple , __a : FrozenDict = None ) -> FrozenDict:
# init input tensor
__UpperCAmelCase = jax.random.normal(__a , __a )
__UpperCAmelCase , __UpperCAmelCase = jax.random.split(__a )
__UpperCAmelCase = {'''params''': params_rng, '''dropout''': dropout_rng}
__UpperCAmelCase = self.module.init(__a , __a )['''params''']
return random_params
def __call__( self : int , __a : List[str] , __a : dict = None , ) -> Dict:
__UpperCAmelCase = jnp.transpose(__a , (0, 2, 3, 1) )
return self.module.apply(
{'''params''': params or self.params} , jnp.array(__a , dtype=jnp.floataa ) , rngs={} , )
| 721 |
'''simple docstring'''
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
__lowerCAmelCase : List[Any] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__lowerCAmelCase : str = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
__lowerCAmelCase : int = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
__lowerCAmelCase : List[Any] = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__lowerCAmelCase : List[str] = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
__lowerCAmelCase : Optional[int] = [
("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"),
("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"),
("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"),
("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"),
("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"),
("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"),
("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"),
("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"),
("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"),
(
"zero-shot-object-detection",
"MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES",
"AutoModelForZeroShotObjectDetection",
),
("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"),
("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"),
("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"),
("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"),
(
"table-question-answering",
"MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForTableQuestionAnswering",
),
("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"),
("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"),
(
"next-sentence-prediction",
"MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES",
"AutoModelForNextSentencePrediction",
),
(
"audio-frame-classification",
"MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForAudioFrameClassification",
),
("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"),
(
"document-question-answering",
"MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForDocumentQuestionAnswering",
),
(
"visual-question-answering",
"MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForVisualQuestionAnswering",
),
("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"),
(
"zero-shot-image-classification",
"MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForZeroShotImageClassification",
),
("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"),
("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"),
("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"),
]
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , UpperCamelCase__ )
return [m.group(0 ) for m in matches]
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__UpperCAmelCase = {
config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
__UpperCAmelCase = collections.defaultdict(UpperCamelCase__ )
__UpperCAmelCase = collections.defaultdict(UpperCamelCase__ )
__UpperCAmelCase = collections.defaultdict(UpperCamelCase__ )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(UpperCamelCase__ ):
__UpperCAmelCase = None
if _re_tf_models.match(UpperCamelCase__ ) is not None:
__UpperCAmelCase = tf_models
__UpperCAmelCase = _re_tf_models.match(UpperCamelCase__ ).groups()[0]
elif _re_flax_models.match(UpperCamelCase__ ) is not None:
__UpperCAmelCase = flax_models
__UpperCAmelCase = _re_flax_models.match(UpperCamelCase__ ).groups()[0]
elif _re_pt_models.match(UpperCamelCase__ ) is not None:
__UpperCAmelCase = pt_models
__UpperCAmelCase = _re_pt_models.match(UpperCamelCase__ ).groups()[0]
if lookup_dict is not None:
while len(UpperCamelCase__ ) > 0:
if attr_name in model_prefix_to_model_type:
__UpperCAmelCase = True
break
# Try again after removing the last word in the name
__UpperCAmelCase = ''''''.join(camel_case_split(UpperCamelCase__ )[:-1] )
__UpperCAmelCase = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
__UpperCAmelCase = list(UpperCamelCase__ )
all_models.sort()
__UpperCAmelCase = {'''model_type''': all_models}
__UpperCAmelCase = [pt_models[t] for t in all_models]
__UpperCAmelCase = [tf_models[t] for t in all_models]
__UpperCAmelCase = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
__UpperCAmelCase = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
__UpperCAmelCase = '''AutoProcessor'''
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
__UpperCAmelCase = '''AutoTokenizer'''
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
__UpperCAmelCase = '''AutoFeatureExtractor'''
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
__UpperCAmelCase = '''AutoTokenizer'''
__UpperCAmelCase = [processors[t] for t in all_models]
return pd.DataFrame(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
__UpperCAmelCase = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""]
__UpperCAmelCase = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
# The type of pipeline may not exist in this framework
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ):
continue
# First extract all model_names
__UpperCAmelCase = []
for name in getattr(UpperCamelCase__ , UpperCamelCase__ ).values():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
model_names.append(UpperCamelCase__ )
else:
model_names.extend(list(UpperCamelCase__ ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def lowerCAmelCase ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ):
"""simple docstring"""
__UpperCAmelCase = get_frameworks_table()
__UpperCAmelCase = Dataset.from_pandas(UpperCamelCase__ )
__UpperCAmelCase = hf_hub_download(
'''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=UpperCamelCase__ )
__UpperCAmelCase = Dataset.from_json(UpperCamelCase__ )
__UpperCAmelCase = {
tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class'''])
for i in range(len(UpperCamelCase__ ) )
}
__UpperCAmelCase = update_pipeline_and_auto_class_table(UpperCamelCase__ )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
__UpperCAmelCase = sorted(table.keys() )
__UpperCAmelCase = pd.DataFrame(
{
'''model_class''': model_classes,
'''pipeline_tag''': [table[m][0] for m in model_classes],
'''auto_class''': [table[m][1] for m in model_classes],
} )
__UpperCAmelCase = Dataset.from_pandas(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(UpperCamelCase__ , '''frameworks.json''' ) )
tags_dataset.to_json(os.path.join(UpperCamelCase__ , '''pipeline_tags.json''' ) )
if commit_sha is not None:
__UpperCAmelCase = (
f"""Update with commit {commit_sha}\n\nSee: """
f"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
__UpperCAmelCase = '''Update'''
upload_folder(
repo_id='''huggingface/transformers-metadata''' , folder_path=UpperCamelCase__ , repo_type='''dataset''' , token=UpperCamelCase__ , commit_message=UpperCamelCase__ , )
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
__UpperCAmelCase = transformers_module.pipelines.SUPPORTED_TASKS
__UpperCAmelCase = []
for key in pipeline_tasks:
if key not in in_table:
__UpperCAmelCase = pipeline_tasks[key]['''pt''']
if isinstance(UpperCamelCase__ , (list, tuple) ):
__UpperCAmelCase = model[0]
__UpperCAmelCase = model.__name__
if model not in in_table.values():
missing.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
__UpperCAmelCase = ''', '''.join(UpperCamelCase__ )
raise ValueError(
'''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '''
f"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.")
parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.")
parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.")
__lowerCAmelCase : Tuple = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 654 | 0 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class _A :
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCamelCase : List[Any] , lowerCamelCase : int=13 , lowerCamelCase : Optional[int]=32 , lowerCamelCase : Tuple=2 , lowerCamelCase : Tuple=3 , lowerCamelCase : Dict=16 , lowerCamelCase : Union[str, Any]=[1, 2, 1] , lowerCamelCase : List[Any]=[2, 2, 4] , lowerCamelCase : Optional[Any]=2 , lowerCamelCase : Any=2.0 , lowerCamelCase : List[str]=True , lowerCamelCase : List[str]=0.0 , lowerCamelCase : int=0.0 , lowerCamelCase : str=0.1 , lowerCamelCase : Union[str, Any]="gelu" , lowerCamelCase : List[Any]=False , lowerCamelCase : List[str]=True , lowerCamelCase : Optional[Any]=0.02 , lowerCamelCase : List[str]=1e-5 , lowerCamelCase : Any=True , lowerCamelCase : Union[str, Any]=None , lowerCamelCase : str=True , lowerCamelCase : str=10 , lowerCamelCase : List[Any]=8 , lowerCamelCase : str=["stage1", "stage2", "stage3"] , lowerCamelCase : Dict=[1, 2, 3] , ):
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = embed_dim
__lowercase = depths
__lowercase = num_heads
__lowercase = window_size
__lowercase = mlp_ratio
__lowercase = qkv_bias
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = drop_path_rate
__lowercase = hidden_act
__lowercase = use_absolute_embeddings
__lowercase = patch_norm
__lowercase = layer_norm_eps
__lowercase = initializer_range
__lowercase = is_training
__lowercase = scope
__lowercase = use_labels
__lowercase = type_sequence_label_size
__lowercase = encoder_stride
__lowercase = out_features
__lowercase = out_indices
def _snake_case ( self : Any ):
'''simple docstring'''
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = self.get_config()
return config, pixel_values, labels
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _snake_case ( self : Union[str, Any] , lowerCamelCase : List[str] , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowercase = MaskFormerSwinModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__lowercase = model(lowerCamelCase )
__lowercase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__lowercase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _snake_case ( self : List[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : List[Any] ):
'''simple docstring'''
__lowercase = MaskFormerSwinBackbone(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__lowercase = model(lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(lowerCamelCase ):
__lowercase = ["stem"]
__lowercase = MaskFormerSwinBackbone(config=lowerCamelCase )
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _A ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_snake_case : List[str] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
_snake_case : str = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {}
_snake_case : List[str] = False
_snake_case : List[str] = False
_snake_case : Any = False
_snake_case : Tuple = False
_snake_case : Dict = False
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
__lowercase = MaskFormerSwinModelTester(self )
__lowercase = ConfigTester(self , config_class=lowerCamelCase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"
" `nn.DataParallel`"
) )
def _snake_case ( self : Tuple ):
'''simple docstring'''
pass
def _snake_case ( self : List[Any] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _snake_case ( self : List[Any] ):
'''simple docstring'''
return
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def _snake_case ( self : Dict ):
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase )
@unittest.skip("Swin does not use inputs_embeds" )
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip("Swin does not support feedforward chunking" )
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
pass
def _snake_case ( self : Dict ):
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase , nn.Linear ) )
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowerCamelCase )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions" )
def _snake_case ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason="MaskFormerSwin is only used as an internal backbone" )
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
pass
def _snake_case ( self : Any , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : List[Any] ):
'''simple docstring'''
__lowercase = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
__lowercase = outputs.hidden_states
__lowercase = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
# Swin has a different seq_length
__lowercase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__lowercase = True
self.check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
self.check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = 3
__lowercase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__lowercase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowercase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__lowercase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__lowercase = True
self.check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
self.check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase , (padded_height, padded_width) )
@unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints" )
def _snake_case ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def _snake_case ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def _snake_case ( self : Dict ):
'''simple docstring'''
pass
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(lowerCamelCase : Optional[Any] ):
__lowercase = 0
return t
def check_equivalence(lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : Optional[int] , lowerCamelCase : int={} ):
with torch.no_grad():
__lowercase = model(**lowerCamelCase , return_dict=lowerCamelCase , **lowerCamelCase )
__lowercase = model(**lowerCamelCase , return_dict=lowerCamelCase , **lowerCamelCase ).to_tuple()
def recursive_check(lowerCamelCase : Any , lowerCamelCase : int ):
if isinstance(lowerCamelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(lowerCamelCase , lowerCamelCase ):
recursive_check(lowerCamelCase , lowerCamelCase )
elif isinstance(lowerCamelCase , lowerCamelCase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(lowerCamelCase , lowerCamelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(lowerCamelCase ) , set_nan_tensor_to_zero(lowerCamelCase ) , atol=1e-5 ) , msg=(
"Tuple and dict output are not equal. Difference:"
f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
f""" {torch.isnan(lowerCamelCase ).any()} and `inf`: {torch.isinf(lowerCamelCase )}. Dict has"""
f""" `nan`: {torch.isnan(lowerCamelCase ).any()} and `inf`: {torch.isinf(lowerCamelCase )}."""
) , )
recursive_check(lowerCamelCase , lowerCamelCase )
for model_class in self.all_model_classes:
__lowercase = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__lowercase = self._prepare_for_class(lowerCamelCase , lowerCamelCase )
__lowercase = self._prepare_for_class(lowerCamelCase , lowerCamelCase )
check_equivalence(lowerCamelCase , lowerCamelCase , lowerCamelCase )
__lowercase = self._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase )
__lowercase = self._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase )
check_equivalence(lowerCamelCase , lowerCamelCase , lowerCamelCase )
__lowercase = self._prepare_for_class(lowerCamelCase , lowerCamelCase )
__lowercase = self._prepare_for_class(lowerCamelCase , lowerCamelCase )
check_equivalence(lowerCamelCase , lowerCamelCase , lowerCamelCase , {"output_hidden_states": True} )
__lowercase = self._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase )
__lowercase = self._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase )
check_equivalence(lowerCamelCase , lowerCamelCase , lowerCamelCase , {"output_hidden_states": True} )
@require_torch
class _A ( unittest.TestCase , _lowercase ):
'''simple docstring'''
_snake_case : Any = (MaskFormerSwinBackbone,) if is_torch_available() else ()
_snake_case : Tuple = MaskFormerSwinConfig
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
__lowercase = MaskFormerSwinModelTester(self )
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
__lowercase = backbone_class(lowerCamelCase )
backbone.to(lowerCamelCase )
backbone.eval()
__lowercase = backbone(**lowerCamelCase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , lowerCamelCase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__lowercase = backbone(**lowerCamelCase , output_hidden_states=lowerCamelCase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__lowercase , __lowercase , __lowercase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__lowercase = backbone(**lowerCamelCase , output_attentions=lowerCamelCase )
self.assertIsNotNone(outputs.attentions )
| 655 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case__ : Any = logging.get_logger(__name__)
class _A ( _lowercase , _lowercase ):
'''simple docstring'''
_snake_case : Dict = """maskformer-swin"""
_snake_case : List[str] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : List[str] , lowerCamelCase : Any=224 , lowerCamelCase : Optional[Any]=4 , lowerCamelCase : Dict=3 , lowerCamelCase : Tuple=96 , lowerCamelCase : str=[2, 2, 6, 2] , lowerCamelCase : Dict=[3, 6, 12, 24] , lowerCamelCase : Optional[Any]=7 , lowerCamelCase : Any=4.0 , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : List[str]=0.0 , lowerCamelCase : Optional[int]=0.0 , lowerCamelCase : List[str]=0.1 , lowerCamelCase : int="gelu" , lowerCamelCase : Optional[int]=False , lowerCamelCase : List[Any]=0.02 , lowerCamelCase : Tuple=1e-5 , lowerCamelCase : Dict=None , lowerCamelCase : Dict=None , **lowerCamelCase : int , ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = embed_dim
__lowercase = depths
__lowercase = len(lowerCamelCase )
__lowercase = num_heads
__lowercase = window_size
__lowercase = mlp_ratio
__lowercase = qkv_bias
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = drop_path_rate
__lowercase = hidden_act
__lowercase = use_absolute_embeddings
__lowercase = layer_norm_eps
__lowercase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowercase = int(embed_dim * 2 ** (len(lowerCamelCase ) - 1) )
__lowercase = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase ) + 1 )]
__lowercase , __lowercase = get_aligned_output_features_output_indices(
out_features=lowerCamelCase , out_indices=lowerCamelCase , stage_names=self.stage_names )
| 655 | 1 |
from ....utils import logging
snake_case__ : List[Any] = logging.get_logger(__name__)
class _A ( _lowercase ):
'''simple docstring'''
def __init__( self : List[str] , lowerCamelCase : Any , lowerCamelCase : Dict=None , lowerCamelCase : Dict=2_048 ):
'''simple docstring'''
__lowercase = config.__dict__
__lowercase = modal_hidden_size
if num_labels:
__lowercase = num_labels
| 655 |
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
__lowercase = gray_code_sequence_string(_SCREAMING_SNAKE_CASE )
#
# convert them to integers
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
__lowercase = int(sequence[i] , 2 )
return sequence
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__lowercase = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__lowercase = gray_code_sequence_string(bit_count - 1 )
__lowercase = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__lowercase = "0" + smaller_sequence[i]
sequence.append(_SCREAMING_SNAKE_CASE )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__lowercase = "1" + smaller_sequence[i]
sequence.append(_SCREAMING_SNAKE_CASE )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _A ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_snake_case : str = KandinskyVaaPipeline
_snake_case : List[Any] = [
"""image_embeds""",
"""negative_image_embeds""",
]
_snake_case : Optional[int] = ["""image_embeds""", """negative_image_embeds"""]
_snake_case : List[str] = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_snake_case : str = False
@property
def _snake_case ( self : Dict ):
'''simple docstring'''
return 32
@property
def _snake_case ( self : int ):
'''simple docstring'''
return 32
@property
def _snake_case ( self : int ):
'''simple docstring'''
return self.time_input_dim
@property
def _snake_case ( self : int ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
return 100
@property
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
__lowercase = UNetaDConditionModel(**lowerCamelCase )
return model
@property
def _snake_case ( self : Tuple ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _snake_case ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = VQModel(**self.dummy_movq_kwargs )
return model
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = self.dummy_unet
__lowercase = self.dummy_movq
__lowercase = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule="linear" , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=lowerCamelCase , set_alpha_to_one=lowerCamelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=lowerCamelCase , )
__lowercase = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def _snake_case ( self : Tuple , lowerCamelCase : Dict , lowerCamelCase : Tuple=0 ):
'''simple docstring'''
__lowercase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
__lowercase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowerCamelCase )
if str(lowerCamelCase ).startswith("mps" ):
__lowercase = torch.manual_seed(lowerCamelCase )
else:
__lowercase = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
__lowercase = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = "cpu"
__lowercase = self.get_dummy_components()
__lowercase = self.pipeline_class(**lowerCamelCase )
__lowercase = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__lowercase = pipe(**self.get_dummy_inputs(lowerCamelCase ) )
__lowercase = output.images
__lowercase = pipe(
**self.get_dummy_inputs(lowerCamelCase ) , return_dict=lowerCamelCase , )[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowercase = np.array(
[0.623_7976, 1.0, 0.3644_1332, 1.0, 0.7063_9634, 0.2987_7186, 0.8565_2125, 0.521_6843, 0.5445_4046] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : str ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self : int ):
'''simple docstring'''
__lowercase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy" )
__lowercase = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase )
__lowercase = KandinskyVaaPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
__lowercase = pipeline.to(lowerCamelCase )
pipeline.set_progress_bar_config(disable=lowerCamelCase )
__lowercase = "red cat, 4k photo"
__lowercase = torch.Generator(device="cuda" ).manual_seed(0 )
__lowercase , __lowercase = pipe_prior(
lowerCamelCase , generator=lowerCamelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
__lowercase = torch.Generator(device="cuda" ).manual_seed(0 )
__lowercase = pipeline(
image_embeds=lowerCamelCase , negative_image_embeds=lowerCamelCase , generator=lowerCamelCase , num_inference_steps=100 , output_type="np" , )
__lowercase = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
| 655 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ):
model.train()
__lowercase = model(_SCREAMING_SNAKE_CASE )
__lowercase = F.mse_loss(_SCREAMING_SNAKE_CASE , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(_SCREAMING_SNAKE_CASE )
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
set_seed(4_2 )
__lowercase = RegressionModel()
__lowercase = deepcopy(_SCREAMING_SNAKE_CASE )
__lowercase = RegressionDataset(length=8_0 )
__lowercase = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=1_6 )
model.to(accelerator.device )
if sched:
__lowercase = AdamW(params=model.parameters() , lr=1E-3 )
__lowercase = AdamW(params=ddp_model.parameters() , lr=1E-3 )
__lowercase = LambdaLR(_SCREAMING_SNAKE_CASE , lr_lambda=lambda _SCREAMING_SNAKE_CASE : epoch**0.6_5 )
__lowercase = LambdaLR(_SCREAMING_SNAKE_CASE , lr_lambda=lambda _SCREAMING_SNAKE_CASE : epoch**0.6_5 )
# Make a copy of `model`
if sched:
__lowercase , __lowercase , __lowercase , __lowercase = accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
__lowercase , __lowercase = accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# Test when on a single CPU or GPU that the context manager does nothing
__lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE )
# Use a single batch
__lowercase , __lowercase = next(iter(_SCREAMING_SNAKE_CASE ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) )
__lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_SCREAMING_SNAKE_CASE ):
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
# Sync grads
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__lowercase = ddp_input[torch.randperm(len(_SCREAMING_SNAKE_CASE ) )]
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# Test on distributed setup that context manager behaves properly
__lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE )
# Use a single batch
__lowercase , __lowercase = next(iter(_SCREAMING_SNAKE_CASE ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) )
__lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_SCREAMING_SNAKE_CASE ):
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
# Sync grads
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__lowercase = ddp_input[torch.randperm(len(_SCREAMING_SNAKE_CASE ) )]
def snake_case_ ( _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ):
__lowercase = Accelerator(
split_batches=_SCREAMING_SNAKE_CASE , dispatch_batches=_SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE )
for iteration, batch in enumerate(_SCREAMING_SNAKE_CASE ):
__lowercase , __lowercase = batch.values()
# Gather the distributed inputs and targs for the base model
__lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) )
__lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(_SCREAMING_SNAKE_CASE ):
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(_SCREAMING_SNAKE_CASE ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__lowercase = ddp_input[torch.randperm(len(_SCREAMING_SNAKE_CASE ) )]
GradientState._reset_state()
def snake_case_ ( _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ):
__lowercase = Accelerator(
split_batches=_SCREAMING_SNAKE_CASE , dispatch_batches=_SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for iteration, batch in enumerate(_SCREAMING_SNAKE_CASE ):
__lowercase , __lowercase = batch.values()
# Gather the distributed inputs and targs for the base model
__lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) )
__lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_SCREAMING_SNAKE_CASE )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(_SCREAMING_SNAKE_CASE ):
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n"""
__lowercase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_SCREAMING_SNAKE_CASE ))
if accelerator.num_processes > 1:
check_model_parameters(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
GradientState._reset_state()
def snake_case_ ( ):
__lowercase = Accelerator()
__lowercase = RegressionDataset(length=8_0 )
__lowercase = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=1_6 )
__lowercase = RegressionDataset(length=9_6 )
__lowercase = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=1_6 )
__lowercase , __lowercase = accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(_SCREAMING_SNAKE_CASE ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_SCREAMING_SNAKE_CASE )
if iteration < len(_SCREAMING_SNAKE_CASE ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(_SCREAMING_SNAKE_CASE ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_SCREAMING_SNAKE_CASE )
if batch_num < len(_SCREAMING_SNAKE_CASE ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def snake_case_ ( ):
__lowercase = Accelerator()
__lowercase = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(_SCREAMING_SNAKE_CASE )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(_SCREAMING_SNAKE_CASE )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation_with_opt_and_scheduler(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 655 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : Optional[Any] = logging.get_logger(__name__)
snake_case__ : Dict = {
"""weiweishi/roc-bert-base-zh""": """https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json""",
}
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : List[str] = """roc_bert"""
def __init__( self : int , lowerCamelCase : int=30_522 , lowerCamelCase : str=768 , lowerCamelCase : List[str]=12 , lowerCamelCase : str=12 , lowerCamelCase : Tuple=3_072 , lowerCamelCase : Optional[int]="gelu" , lowerCamelCase : Dict=0.1 , lowerCamelCase : Optional[int]=0.1 , lowerCamelCase : Dict=512 , lowerCamelCase : Tuple=2 , lowerCamelCase : Optional[Any]=0.02 , lowerCamelCase : Dict=1e-12 , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : Union[str, Any]=0 , lowerCamelCase : Dict="absolute" , lowerCamelCase : str=None , lowerCamelCase : Any=True , lowerCamelCase : List[Any]=True , lowerCamelCase : Optional[Any]=768 , lowerCamelCase : int=910 , lowerCamelCase : Union[str, Any]=512 , lowerCamelCase : Dict=24_858 , lowerCamelCase : Any=True , **lowerCamelCase : Dict , ):
'''simple docstring'''
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = type_vocab_size
__lowercase = layer_norm_eps
__lowercase = use_cache
__lowercase = enable_pronunciation
__lowercase = enable_shape
__lowercase = pronunciation_embed_dim
__lowercase = pronunciation_vocab_size
__lowercase = shape_embed_dim
__lowercase = shape_vocab_size
__lowercase = concat_input
__lowercase = position_embedding_type
__lowercase = classifier_dropout
super().__init__(pad_token_id=lowerCamelCase , **lowerCamelCase )
| 655 |
from ....utils import logging
snake_case__ : List[Any] = logging.get_logger(__name__)
class _A ( _lowercase ):
'''simple docstring'''
def __init__( self : List[str] , lowerCamelCase : Any , lowerCamelCase : Dict=None , lowerCamelCase : Dict=2_048 ):
'''simple docstring'''
__lowercase = config.__dict__
__lowercase = modal_hidden_size
if num_labels:
__lowercase = num_labels
| 655 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class _A ( unittest.TestCase ):
'''simple docstring'''
@slow
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
__lowercase = AutoConfig.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
__lowercase = TFAutoModel.from_pretrained(lowerCamelCase , from_pt=lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
__lowercase = AutoModel.from_pretrained(lowerCamelCase , from_tf=lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
@slow
def _snake_case ( self : str ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
__lowercase = AutoConfig.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
__lowercase = TFAutoModelForPreTraining.from_pretrained(lowerCamelCase , from_pt=lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
__lowercase = AutoModelForPreTraining.from_pretrained(lowerCamelCase , from_tf=lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
@slow
def _snake_case ( self : int ):
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = AutoConfig.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
__lowercase = TFAutoModelForCausalLM.from_pretrained(lowerCamelCase , from_pt=lowerCamelCase )
__lowercase , __lowercase = TFAutoModelForCausalLM.from_pretrained(
lowerCamelCase , output_loading_info=lowerCamelCase , from_pt=lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
__lowercase = AutoModelForCausalLM.from_pretrained(lowerCamelCase , from_tf=lowerCamelCase )
__lowercase , __lowercase = AutoModelForCausalLM.from_pretrained(
lowerCamelCase , output_loading_info=lowerCamelCase , from_tf=lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
@slow
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = AutoConfig.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
__lowercase = TFAutoModelWithLMHead.from_pretrained(lowerCamelCase , from_pt=lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
__lowercase = AutoModelWithLMHead.from_pretrained(lowerCamelCase , from_tf=lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
@slow
def _snake_case ( self : Tuple ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = AutoConfig.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
__lowercase = TFAutoModelForMaskedLM.from_pretrained(lowerCamelCase , from_pt=lowerCamelCase )
__lowercase , __lowercase = TFAutoModelForMaskedLM.from_pretrained(
lowerCamelCase , output_loading_info=lowerCamelCase , from_pt=lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
__lowercase = AutoModelForMaskedLM.from_pretrained(lowerCamelCase , from_tf=lowerCamelCase )
__lowercase , __lowercase = AutoModelForMaskedLM.from_pretrained(
lowerCamelCase , output_loading_info=lowerCamelCase , from_tf=lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
@slow
def _snake_case ( self : str ):
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = AutoConfig.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
__lowercase = TFAutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase , from_pt=lowerCamelCase )
__lowercase , __lowercase = TFAutoModelForSeqaSeqLM.from_pretrained(
lowerCamelCase , output_loading_info=lowerCamelCase , from_pt=lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
__lowercase = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase , from_tf=lowerCamelCase )
__lowercase , __lowercase = AutoModelForSeqaSeqLM.from_pretrained(
lowerCamelCase , output_loading_info=lowerCamelCase , from_tf=lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
@slow
def _snake_case ( self : List[Any] ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
__lowercase = AutoConfig.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
__lowercase = TFAutoModelForSequenceClassification.from_pretrained(lowerCamelCase , from_pt=lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
__lowercase = AutoModelForSequenceClassification.from_pretrained(lowerCamelCase , from_tf=lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
@slow
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
__lowercase = AutoConfig.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
__lowercase = TFAutoModelForQuestionAnswering.from_pretrained(lowerCamelCase , from_pt=lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
__lowercase = AutoModelForQuestionAnswering.from_pretrained(lowerCamelCase , from_tf=lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = TFAutoModelWithLMHead.from_pretrained(lowerCamelCase , from_pt=lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase ) , 14_410 )
__lowercase = AutoModelWithLMHead.from_pretrained(lowerCamelCase , from_tf=lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase ) , 14_410 )
def _snake_case ( self : List[str] ):
'''simple docstring'''
__lowercase = TFAutoModelWithLMHead.from_pretrained(lowerCamelCase , from_pt=lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase ) , 14_410 )
__lowercase = AutoModelWithLMHead.from_pretrained(lowerCamelCase , from_tf=lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase ) , 14_410 )
| 655 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _A ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_snake_case : Dict = StableUnCLIPImgaImgPipeline
_snake_case : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_snake_case : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_snake_case : int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_snake_case : int = frozenset([] )
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowercase = 32
__lowercase = embedder_hidden_size
# image encoding components
__lowercase = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
__lowercase = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCamelCase , projection_dim=lowerCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
__lowercase = StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase )
__lowercase = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
__lowercase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__lowercase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCamelCase , layers_per_block=1 , upcast_attention=lowerCamelCase , use_linear_projection=lowerCamelCase , )
torch.manual_seed(0 )
__lowercase = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=lowerCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
__lowercase = AutoencoderKL()
__lowercase = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def _snake_case ( self : List[Any] , lowerCamelCase : str , lowerCamelCase : Any=0 , lowerCamelCase : Union[str, Any]=True ):
'''simple docstring'''
if str(lowerCamelCase ).startswith("mps" ):
__lowercase = torch.manual_seed(lowerCamelCase )
else:
__lowercase = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
if pil_image:
__lowercase = input_image * 0.5 + 0.5
__lowercase = input_image.clamp(0 , 1 )
__lowercase = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__lowercase = DiffusionPipeline.numpy_to_pil(lowerCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = "cpu" # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = StableUnCLIPImgaImgPipeline(**lowerCamelCase )
__lowercase = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
__lowercase = self.get_dummy_inputs(lowerCamelCase )
inputs.update({"image_embeds": None} )
__lowercase = sd_pipe(**lowerCamelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowercase = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self : Dict ):
'''simple docstring'''
__lowercase = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase )
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _snake_case ( self : str ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase )
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self : Any ):
'''simple docstring'''
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
__lowercase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
__lowercase = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowercase = torch.Generator(device="cpu" ).manual_seed(0 )
__lowercase = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" )
__lowercase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
__lowercase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
__lowercase = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowercase = torch.Generator(device="cpu" ).manual_seed(0 )
__lowercase = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" )
__lowercase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowercase = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
__lowercase = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowercase = pipe(
lowerCamelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , )
__lowercase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 655 | 1 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class _A :
'''simple docstring'''
_snake_case : CommonSchedulerState
# setable values
_snake_case : jnp.ndarray
_snake_case : jnp.ndarray
_snake_case : Optional[int] = None
@classmethod
def _snake_case ( cls : Tuple , lowerCamelCase : CommonSchedulerState , lowerCamelCase : jnp.ndarray , lowerCamelCase : jnp.ndarray ):
'''simple docstring'''
return cls(common=lowerCamelCase , init_noise_sigma=lowerCamelCase , timesteps=lowerCamelCase )
@dataclass
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : DDPMSchedulerState
class _A ( _lowercase , _lowercase ):
'''simple docstring'''
_snake_case : List[str] = [e.name for e in FlaxKarrasDiffusionSchedulers]
_snake_case : jnp.dtype
@property
def _snake_case ( self : str ):
'''simple docstring'''
return True
@register_to_config
def __init__( self : Tuple , lowerCamelCase : int = 1_000 , lowerCamelCase : float = 0.0001 , lowerCamelCase : float = 0.02 , lowerCamelCase : str = "linear" , lowerCamelCase : Optional[jnp.ndarray] = None , lowerCamelCase : str = "fixed_small" , lowerCamelCase : bool = True , lowerCamelCase : str = "epsilon" , lowerCamelCase : jnp.dtype = jnp.floataa , ):
'''simple docstring'''
__lowercase = dtype
def _snake_case ( self : Union[str, Any] , lowerCamelCase : Optional[CommonSchedulerState] = None ):
'''simple docstring'''
if common is None:
__lowercase = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
__lowercase = jnp.array(1.0 , dtype=self.dtype )
__lowercase = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=lowerCamelCase , init_noise_sigma=lowerCamelCase , timesteps=lowerCamelCase , )
def _snake_case ( self : List[Any] , lowerCamelCase : DDPMSchedulerState , lowerCamelCase : jnp.ndarray , lowerCamelCase : Optional[int] = None ):
'''simple docstring'''
return sample
def _snake_case ( self : Union[str, Any] , lowerCamelCase : DDPMSchedulerState , lowerCamelCase : int , lowerCamelCase : Tuple = () ):
'''simple docstring'''
__lowercase = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
__lowercase = (jnp.arange(0 , lowerCamelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=lowerCamelCase , timesteps=lowerCamelCase , )
def _snake_case ( self : Tuple , lowerCamelCase : DDPMSchedulerState , lowerCamelCase : int , lowerCamelCase : int=None , lowerCamelCase : Tuple=None ):
'''simple docstring'''
__lowercase = state.common.alphas_cumprod[t]
__lowercase = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__lowercase = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
__lowercase = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
__lowercase = jnp.clip(lowerCamelCase , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
__lowercase = jnp.log(jnp.clip(lowerCamelCase , a_min=1e-20 ) )
elif variance_type == "fixed_large":
__lowercase = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
__lowercase = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
__lowercase = variance
__lowercase = state.common.betas[t]
__lowercase = (predicted_variance + 1) / 2
__lowercase = frac * max_log + (1 - frac) * min_log
return variance
def _snake_case ( self : Dict , lowerCamelCase : DDPMSchedulerState , lowerCamelCase : jnp.ndarray , lowerCamelCase : int , lowerCamelCase : jnp.ndarray , lowerCamelCase : Optional[jax.random.KeyArray] = None , lowerCamelCase : bool = True , ):
'''simple docstring'''
__lowercase = timestep
if key is None:
__lowercase = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
__lowercase , __lowercase = jnp.split(lowerCamelCase , sample.shape[1] , axis=1 )
else:
__lowercase = None
# 1. compute alphas, betas
__lowercase = state.common.alphas_cumprod[t]
__lowercase = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
__lowercase = 1 - alpha_prod_t
__lowercase = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__lowercase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__lowercase = model_output
elif self.config.prediction_type == "v_prediction":
__lowercase = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__lowercase = jnp.clip(lowerCamelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowercase = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
__lowercase = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowercase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
__lowercase = jax.random.split(lowerCamelCase , num=1 )
__lowercase = jax.random.normal(lowerCamelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(lowerCamelCase , lowerCamelCase , predicted_variance=lowerCamelCase ) ** 0.5) * noise
__lowercase = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
__lowercase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=lowerCamelCase , state=lowerCamelCase )
def _snake_case ( self : Optional[int] , lowerCamelCase : DDPMSchedulerState , lowerCamelCase : jnp.ndarray , lowerCamelCase : jnp.ndarray , lowerCamelCase : jnp.ndarray , ):
'''simple docstring'''
return add_noise_common(state.common , lowerCamelCase , lowerCamelCase , lowerCamelCase )
def _snake_case ( self : Tuple , lowerCamelCase : DDPMSchedulerState , lowerCamelCase : jnp.ndarray , lowerCamelCase : jnp.ndarray , lowerCamelCase : jnp.ndarray , ):
'''simple docstring'''
return get_velocity_common(state.common , lowerCamelCase , lowerCamelCase , lowerCamelCase )
def __len__( self : Tuple ):
'''simple docstring'''
return self.config.num_train_timesteps
| 655 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _A ( _lowercase , _lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self : Optional[Any] , *,
lowerCamelCase : int = 4 , lowerCamelCase : int = 768 , lowerCamelCase : int , lowerCamelCase : Optional[int] , ):
'''simple docstring'''
super().__init__()
__lowercase = nn.Parameter(torch.zeros(lowerCamelCase ) )
# parameters for additional clip time embeddings
__lowercase = nn.Linear(lowerCamelCase , lowerCamelCase )
__lowercase = nn.Linear(lowerCamelCase , lowerCamelCase )
# parameters for encoder hidden states
__lowercase = clip_extra_context_tokens
__lowercase = nn.Linear(
lowerCamelCase , self.clip_extra_context_tokens * cross_attention_dim )
__lowercase = nn.Linear(lowerCamelCase , lowerCamelCase )
__lowercase = nn.LayerNorm(lowerCamelCase )
def _snake_case ( self : Union[str, Any] , *, lowerCamelCase : Any , lowerCamelCase : Tuple , lowerCamelCase : Optional[int] , lowerCamelCase : Tuple ):
'''simple docstring'''
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
__lowercase = image_embeddings.shape[0]
__lowercase = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
__lowercase = classifier_free_guidance_embeddings.expand(
lowerCamelCase , -1 )
__lowercase = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
__lowercase = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
__lowercase = self.embedding_proj(lowerCamelCase )
__lowercase = self.clip_image_embeddings_project_to_time_embeddings(lowerCamelCase )
__lowercase = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
__lowercase = self.clip_extra_context_tokens_proj(lowerCamelCase )
__lowercase = clip_extra_context_tokens.reshape(lowerCamelCase , -1 , self.clip_extra_context_tokens )
__lowercase = clip_extra_context_tokens.permute(0 , 2 , 1 )
__lowercase = self.encoder_hidden_states_proj(lowerCamelCase )
__lowercase = self.text_encoder_hidden_states_norm(lowerCamelCase )
__lowercase = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 655 | 1 |
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
snake_case__ : Optional[Any] = logging.getLogger(__name__)
snake_case__ : str = """Hello world! cécé herlolip"""
snake_case__ : str = namedtuple(
"""BertAbsConfig""",
[
"""temp_dir""",
"""large""",
"""use_bert_emb""",
"""finetune_bert""",
"""encoder""",
"""share_emb""",
"""max_pos""",
"""enc_layers""",
"""enc_hidden_size""",
"""enc_heads""",
"""enc_ff_size""",
"""enc_dropout""",
"""dec_layers""",
"""dec_hidden_size""",
"""dec_heads""",
"""dec_ff_size""",
"""dec_dropout""",
],
)
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = BertAbsConfig(
temp_dir="." , finetune_bert=_SCREAMING_SNAKE_CASE , large=_SCREAMING_SNAKE_CASE , share_emb=_SCREAMING_SNAKE_CASE , use_bert_emb=_SCREAMING_SNAKE_CASE , encoder="bert" , max_pos=5_1_2 , enc_layers=6 , enc_hidden_size=5_1_2 , enc_heads=8 , enc_ff_size=5_1_2 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_6_8 , dec_heads=8 , dec_ff_size=2_0_4_8 , dec_dropout=0.2 , )
__lowercase = torch.load(_SCREAMING_SNAKE_CASE , lambda _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : storage )
__lowercase = AbsSummarizer(_SCREAMING_SNAKE_CASE , torch.device("cpu" ) , _SCREAMING_SNAKE_CASE )
original.eval()
__lowercase = BertAbsSummarizer(_SCREAMING_SNAKE_CASE , torch.device("cpu" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical" )
__lowercase = BertTokenizer.from_pretrained("bert-base-uncased" )
# prepare the model inputs
__lowercase = tokenizer.encode("This is sample éàalj'-." )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(_SCREAMING_SNAKE_CASE )) )
__lowercase = torch.tensor(_SCREAMING_SNAKE_CASE ).unsqueeze(0 )
__lowercase = tokenizer.encode("This is sample 3 éàalj'-." )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(_SCREAMING_SNAKE_CASE )) )
__lowercase = torch.tensor(_SCREAMING_SNAKE_CASE ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__lowercase = encoder_input_ids
__lowercase = decoder_input_ids
__lowercase = __lowercase = None
__lowercase = None
__lowercase = __lowercase = None
__lowercase = __lowercase = None
__lowercase = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__lowercase = original(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )[0]
__lowercase = original.generator(_SCREAMING_SNAKE_CASE )
__lowercase = new_model(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )[0]
__lowercase = new_model.generator(_SCREAMING_SNAKE_CASE )
__lowercase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(_SCREAMING_SNAKE_CASE ) )
__lowercase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(_SCREAMING_SNAKE_CASE ) )
__lowercase = torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 )
if are_identical:
logging.info("all weights are equal up to 1e-3" )
else:
raise ValueError("the weights are different. The new model is likely different from the original one." )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary" )
torch.save(
new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin" )
if __name__ == "__main__":
snake_case__ : str = argparse.ArgumentParser()
parser.add_argument(
"""--bertabs_checkpoint_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
snake_case__ : Tuple = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 655 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
snake_case__ : Union[str, Any] = TypeVar("""T""")
snake_case__ : Optional[int] = TypeVar("""U""")
class _A ( Generic[T, U] ):
'''simple docstring'''
def __init__( self : Optional[int] , lowerCamelCase : T | None , lowerCamelCase : U | None ):
'''simple docstring'''
__lowercase = key
__lowercase = val
__lowercase = None
__lowercase = None
def __repr__( self : Any ):
'''simple docstring'''
return (
f"""Node: key: {self.key}, val: {self.val}, """
f"""has next: {bool(self.next )}, has prev: {bool(self.prev )}"""
)
class _A ( Generic[T, U] ):
'''simple docstring'''
def __init__( self : Dict ):
'''simple docstring'''
__lowercase = DoubleLinkedListNode(lowerCamelCase , lowerCamelCase )
__lowercase = DoubleLinkedListNode(lowerCamelCase , lowerCamelCase )
__lowercase , __lowercase = self.rear, self.head
def __repr__( self : Optional[Any] ):
'''simple docstring'''
__lowercase = ["DoubleLinkedList"]
__lowercase = self.head
while node.next is not None:
rep.append(str(lowerCamelCase ) )
__lowercase = node.next
rep.append(str(self.rear ) )
return ",\n ".join(lowerCamelCase )
def _snake_case ( self : Union[str, Any] , lowerCamelCase : DoubleLinkedListNode[T, U] ):
'''simple docstring'''
__lowercase = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
__lowercase = node
__lowercase = previous
__lowercase = node
__lowercase = self.rear
def _snake_case ( self : Optional[int] , lowerCamelCase : DoubleLinkedListNode[T, U] ):
'''simple docstring'''
if node.prev is None or node.next is None:
return None
__lowercase = node.next
__lowercase = node.prev
__lowercase = None
__lowercase = None
return node
class _A ( Generic[T, U] ):
'''simple docstring'''
_snake_case : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self : List[Any] , lowerCamelCase : int ):
'''simple docstring'''
__lowercase = DoubleLinkedList()
__lowercase = capacity
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase = {}
def __repr__( self : Optional[Any] ):
'''simple docstring'''
return (
f"""CacheInfo(hits={self.hits}, misses={self.miss}, """
f"""capacity={self.capacity}, current size={self.num_keys})"""
)
def __contains__( self : Dict , lowerCamelCase : T ):
'''simple docstring'''
return key in self.cache
def _snake_case ( self : List[Any] , lowerCamelCase : T ):
'''simple docstring'''
if key in self.cache:
self.hits += 1
__lowercase = self.cache[key]
__lowercase = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(lowerCamelCase )
return node.val
self.miss += 1
return None
def _snake_case ( self : Union[str, Any] , lowerCamelCase : T , lowerCamelCase : U ):
'''simple docstring'''
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
__lowercase = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(lowerCamelCase ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
__lowercase = DoubleLinkedListNode(lowerCamelCase , lowerCamelCase )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
__lowercase = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
__lowercase = value
self.list.add(lowerCamelCase )
@classmethod
def _snake_case ( cls : Union[str, Any] , lowerCamelCase : int = 128 ):
'''simple docstring'''
def cache_decorator_inner(lowerCamelCase : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*lowerCamelCase : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
__lowercase = LRUCache(lowerCamelCase )
__lowercase = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
__lowercase = func(*lowerCamelCase )
cls.decorator_function_to_instance_map[func].put(args[0] , lowerCamelCase )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(lowerCamelCase , "cache_info" , lowerCamelCase ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
snake_case__ : int = {
"""configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""],
"""tokenization_ctrl""": ["""CTRLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Dict = [
"""CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CTRLForSequenceClassification""",
"""CTRLLMHeadModel""",
"""CTRLModel""",
"""CTRLPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Union[str, Any] = [
"""TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCTRLForSequenceClassification""",
"""TFCTRLLMHeadModel""",
"""TFCTRLModel""",
"""TFCTRLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
snake_case__ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 655 |
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
snake_case__ : Optional[Any] = logging.getLogger()
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = "\n".join(_SCREAMING_SNAKE_CASE )
Path(_SCREAMING_SNAKE_CASE ).open("w" ).writelines(_SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = """patrickvonplaten/t5-tiny-random"""
snake_case__ : int = """sshleifer/bart-tiny-random"""
snake_case__ : Union[str, Any] = """sshleifer/tiny-mbart"""
snake_case__ : List[str] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class _A ( _lowercase ):
'''simple docstring'''
def _snake_case ( self : str , lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowercase = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
__lowercase = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
__lowercase = [" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."]
_dump_articles(lowerCamelCase , lowerCamelCase )
__lowercase = str(Path(self.get_auto_remove_tmp_dir() ) / "scores.json" )
__lowercase = "translation_en_to_de" if model == T5_TINY else "summarization"
__lowercase = f"""
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
""".split()
with patch.object(lowerCamelCase , "argv" , lowerCamelCase ):
run_generate()
assert Path(lowerCamelCase ).exists()
# os.remove(Path(output_file_name))
def _snake_case ( self : Dict ):
'''simple docstring'''
self.run_eval_tester(lowerCamelCase )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def _snake_case ( self : Optional[Any] , lowerCamelCase : str ):
'''simple docstring'''
self.run_eval_tester(lowerCamelCase )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def _snake_case ( self : Optional[Any] , lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowercase = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
__lowercase = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
__lowercase = {
"en": ["Machine learning is great, isn't it?", "I like to eat bananas", "Tomorrow is another great day!"],
"de": [
"Maschinelles Lernen ist großartig, oder?",
"Ich esse gerne Bananen",
"Morgen ist wieder ein toller Tag!",
],
}
__lowercase = Path(self.get_auto_remove_tmp_dir() )
__lowercase = str(tmp_dir / "scores.json" )
__lowercase = str(tmp_dir / "val.target" )
_dump_articles(lowerCamelCase , text["en"] )
_dump_articles(lowerCamelCase , text["de"] )
__lowercase = "translation_en_to_de" if model == T5_TINY else "summarization"
__lowercase = f"""
run_eval_search.py
{model}
{str(lowerCamelCase )}
{str(lowerCamelCase )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
""".split()
testargs.extend(["--search", "num_beams=1:2 length_penalty=0.9:1.0"] )
with patch.object(lowerCamelCase , "argv" , lowerCamelCase ):
with CaptureStdout() as cs:
run_search()
__lowercase = [" num_beams | length_penalty", model, "Best score args"]
__lowercase = ["Info"]
if "translation" in task:
expected_strings.append("bleu" )
else:
expected_strings.extend(lowerCamelCase )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(lowerCamelCase ).exists()
os.remove(Path(lowerCamelCase ) )
| 655 | 1 |
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
class _A :
'''simple docstring'''
def __init__( self : Dict , lowerCamelCase : Dict ):
'''simple docstring'''
__lowercase = metric_id
class _A :
'''simple docstring'''
_snake_case : str = [MetricMock(_lowercase ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]]
def _snake_case ( self : List[Any] ):
'''simple docstring'''
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if "tmp_path" in args:
__lowercase = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(_SCREAMING_SNAKE_CASE , match="https://huggingface.co/docs/evaluate" ):
func(*_SCREAMING_SNAKE_CASE )
| 655 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class _A :
'''simple docstring'''
_snake_case : int
_snake_case : TreeNode | None = None
_snake_case : TreeNode | None = None
snake_case__ : Dict = namedtuple("""CoinsDistribResult""", """moves excess""")
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
if root is None:
return 0
# Validation
def count_nodes(_SCREAMING_SNAKE_CASE ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_SCREAMING_SNAKE_CASE ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_SCREAMING_SNAKE_CASE ) != count_coins(_SCREAMING_SNAKE_CASE ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(_SCREAMING_SNAKE_CASE ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
__lowercase , __lowercase = get_distrib(node.left )
__lowercase , __lowercase = get_distrib(node.right )
__lowercase = 1 - left_distrib_excess
__lowercase = 1 - right_distrib_excess
__lowercase = (
left_distrib_moves
+ right_distrib_moves
+ abs(_SCREAMING_SNAKE_CASE )
+ abs(_SCREAMING_SNAKE_CASE )
)
__lowercase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return get_distrib(_SCREAMING_SNAKE_CASE )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655 | 1 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _A ( _lowercase ):
'''simple docstring'''
def __init__( self : str , lowerCamelCase : TransformeraDModel , lowerCamelCase : AutoencoderKL , lowerCamelCase : KarrasDiffusionSchedulers , lowerCamelCase : Optional[Dict[int, str]] = None , ):
'''simple docstring'''
super().__init__()
self.register_modules(transformer=lowerCamelCase , vae=lowerCamelCase , scheduler=lowerCamelCase )
# create a imagenet -> id dictionary for easier use
__lowercase = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
__lowercase = int(lowerCamelCase )
__lowercase = dict(sorted(self.labels.items() ) )
def _snake_case ( self : int , lowerCamelCase : Union[str, List[str]] ):
'''simple docstring'''
if not isinstance(lowerCamelCase , lowerCamelCase ):
__lowercase = list(lowerCamelCase )
for l in label:
if l not in self.labels:
raise ValueError(
f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""" )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : Any , lowerCamelCase : List[int] , lowerCamelCase : float = 4.0 , lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase : int = 50 , lowerCamelCase : Optional[str] = "pil" , lowerCamelCase : bool = True , ):
'''simple docstring'''
__lowercase = len(lowerCamelCase )
__lowercase = self.transformer.config.sample_size
__lowercase = self.transformer.config.in_channels
__lowercase = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowerCamelCase , device=self.device , dtype=self.transformer.dtype , )
__lowercase = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
__lowercase = torch.tensor(lowerCamelCase , device=self.device ).reshape(-1 )
__lowercase = torch.tensor([1_000] * batch_size , device=self.device )
__lowercase = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
__lowercase = latent_model_input[: len(lowerCamelCase ) // 2]
__lowercase = torch.cat([half, half] , dim=0 )
__lowercase = self.scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
__lowercase = t
if not torch.is_tensor(lowerCamelCase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
__lowercase = latent_model_input.device.type == "mps"
if isinstance(lowerCamelCase , lowerCamelCase ):
__lowercase = torch.floataa if is_mps else torch.floataa
else:
__lowercase = torch.intaa if is_mps else torch.intaa
__lowercase = torch.tensor([timesteps] , dtype=lowerCamelCase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
__lowercase = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__lowercase = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
__lowercase = self.transformer(
lowerCamelCase , timestep=lowerCamelCase , class_labels=lowerCamelCase ).sample
# perform guidance
if guidance_scale > 1:
__lowercase , __lowercase = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
__lowercase , __lowercase = torch.split(lowerCamelCase , len(lowerCamelCase ) // 2 , dim=0 )
__lowercase = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
__lowercase = torch.cat([half_eps, half_eps] , dim=0 )
__lowercase = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
__lowercase , __lowercase = torch.split(lowerCamelCase , lowerCamelCase , dim=1 )
else:
__lowercase = noise_pred
# compute previous image: x_t -> x_t-1
__lowercase = self.scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase ).prev_sample
if guidance_scale > 1:
__lowercase , __lowercase = latent_model_input.chunk(2 , dim=0 )
else:
__lowercase = latent_model_input
__lowercase = 1 / self.vae.config.scaling_factor * latents
__lowercase = self.vae.decode(lowerCamelCase ).sample
__lowercase = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__lowercase = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowercase = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowerCamelCase )
| 655 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = SwinvaConfig()
__lowercase = swinva_name.split("_" )
__lowercase = name_split[1]
if "to" in name_split[3]:
__lowercase = int(name_split[3][-3:] )
else:
__lowercase = int(name_split[3] )
if "to" in name_split[2]:
__lowercase = int(name_split[2][-2:] )
else:
__lowercase = int(name_split[2][6:] )
if model_size == "tiny":
__lowercase = 9_6
__lowercase = (2, 2, 6, 2)
__lowercase = (3, 6, 1_2, 2_4)
elif model_size == "small":
__lowercase = 9_6
__lowercase = (2, 2, 1_8, 2)
__lowercase = (3, 6, 1_2, 2_4)
elif model_size == "base":
__lowercase = 1_2_8
__lowercase = (2, 2, 1_8, 2)
__lowercase = (4, 8, 1_6, 3_2)
else:
__lowercase = 1_9_2
__lowercase = (2, 2, 1_8, 2)
__lowercase = (6, 1_2, 2_4, 4_8)
if "to" in swinva_name:
__lowercase = (1_2, 1_2, 1_2, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
__lowercase = 2_1_8_4_1
__lowercase = "huggingface/label-files"
__lowercase = "imagenet-22k-id2label.json"
__lowercase = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
__lowercase = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
else:
__lowercase = 1_0_0_0
__lowercase = "huggingface/label-files"
__lowercase = "imagenet-1k-id2label.json"
__lowercase = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
__lowercase = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
__lowercase = img_size
__lowercase = num_classes
__lowercase = embed_dim
__lowercase = depths
__lowercase = num_heads
__lowercase = window_size
return config
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
if "patch_embed.proj" in name:
__lowercase = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
__lowercase = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
__lowercase = "encoder." + name
if "attn.proj" in name:
__lowercase = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
__lowercase = name.replace("attn" , "attention.self" )
if "norm1" in name:
__lowercase = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__lowercase = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
__lowercase = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__lowercase = name.replace("mlp.fc2" , "output.dense" )
if "q_bias" in name:
__lowercase = name.replace("q_bias" , "query.bias" )
if "k_bias" in name:
__lowercase = name.replace("k_bias" , "key.bias" )
if "v_bias" in name:
__lowercase = name.replace("v_bias" , "value.bias" )
if "cpb_mlp" in name:
__lowercase = name.replace("cpb_mlp" , "continuous_position_bias_mlp" )
if name == "norm.weight":
__lowercase = "layernorm.weight"
if name == "norm.bias":
__lowercase = "layernorm.bias"
if "head" in name:
__lowercase = name.replace("head" , "classifier" )
else:
__lowercase = "swinv2." + name
return name
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for key in orig_state_dict.copy().keys():
__lowercase = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if "mask" in key:
continue
elif "qkv" in key:
__lowercase = key.split("." )
__lowercase = int(key_split[1] )
__lowercase = int(key_split[3] )
__lowercase = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__lowercase = val[:dim, :]
__lowercase = val[dim : dim * 2, :]
__lowercase = val[-dim:, :]
else:
__lowercase = val[:dim]
__lowercase = val[
dim : dim * 2
]
__lowercase = val[-dim:]
else:
__lowercase = val
return orig_state_dict
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE )
timm_model.eval()
__lowercase = get_swinva_config(_SCREAMING_SNAKE_CASE )
__lowercase = SwinvaForImageClassification(_SCREAMING_SNAKE_CASE )
model.eval()
__lowercase = convert_state_dict(timm_model.state_dict() , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
__lowercase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowercase = AutoImageProcessor.from_pretrained("microsoft/{}".format(swinva_name.replace("_" , "-" ) ) )
__lowercase = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
__lowercase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt" )
__lowercase = timm_model(inputs["pixel_values"] )
__lowercase = model(**_SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 )
print(F"""Saving model {swinva_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
model.push_to_hub(
repo_path_or_name=Path(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , organization="nandwalritik" , commit_message="Add model" , )
if __name__ == "__main__":
snake_case__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swinv2_name""",
default="""swinv2_tiny_patch4_window8_256""",
type=str,
help="""Name of the Swinv2 timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
snake_case__ : str = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 655 | 1 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
__lowercase = None
if token is not None:
__lowercase = {"Accept": "application/vnd.github+json", "Authorization": F"""Bearer {token}"""}
__lowercase = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
__lowercase = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json()
__lowercase = {}
try:
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
__lowercase = math.ceil((result["total_count"] - 1_0_0) / 1_0_0 )
for i in range(_SCREAMING_SNAKE_CASE ):
__lowercase = requests.get(url + F"""&page={i + 2}""" , headers=_SCREAMING_SNAKE_CASE ).json()
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return job_links
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
__lowercase = None
if token is not None:
__lowercase = {"Accept": "application/vnd.github+json", "Authorization": F"""Bearer {token}"""}
__lowercase = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
__lowercase = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json()
__lowercase = {}
try:
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
__lowercase = math.ceil((result["total_count"] - 1_0_0) / 1_0_0 )
for i in range(_SCREAMING_SNAKE_CASE ):
__lowercase = requests.get(url + F"""&page={i + 2}""" , headers=_SCREAMING_SNAKE_CASE ).json()
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
return artifacts
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = None
if token is not None:
__lowercase = {"Accept": "application/vnd.github+json", "Authorization": F"""Bearer {token}"""}
__lowercase = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE , allow_redirects=_SCREAMING_SNAKE_CASE )
__lowercase = result.headers["Location"]
__lowercase = requests.get(_SCREAMING_SNAKE_CASE , allow_redirects=_SCREAMING_SNAKE_CASE )
__lowercase = os.path.join(_SCREAMING_SNAKE_CASE , F"""{artifact_name}.zip""" )
with open(_SCREAMING_SNAKE_CASE , "wb" ) as fp:
fp.write(response.content )
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
__lowercase = []
__lowercase = []
__lowercase = None
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z:
for filename in z.namelist():
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(_SCREAMING_SNAKE_CASE ) as f:
for line in f:
__lowercase = line.decode("UTF-8" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
__lowercase = line[: line.index(": " )]
__lowercase = line[line.index(": " ) + len(": " ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("FAILED " ):
# `test` is the test method that failed
__lowercase = line[len("FAILED " ) :]
failed_tests.append(_SCREAMING_SNAKE_CASE )
elif filename == "job_name.txt":
__lowercase = line
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
F"""`errors` and `failed_tests` should have the same number of elements. Got {len(_SCREAMING_SNAKE_CASE )} for `errors` """
F"""and {len(_SCREAMING_SNAKE_CASE )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
" problem." )
__lowercase = None
if job_name and job_links:
__lowercase = job_links.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# A list with elements of the form (line of error, error, failed test)
__lowercase = [x + [y] + [job_link] for x, y in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )]
return result
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
__lowercase = []
__lowercase = [os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for p in os.listdir(_SCREAMING_SNAKE_CASE ) if p.endswith(".zip" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(_SCREAMING_SNAKE_CASE , job_links=_SCREAMING_SNAKE_CASE ) )
return errors
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
__lowercase = Counter()
counter.update([x[1] for x in logs] )
__lowercase = counter.most_common()
__lowercase = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
__lowercase = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]}
__lowercase = dict(sorted(r.items() , key=lambda _SCREAMING_SNAKE_CASE : item[1]["count"] , reverse=_SCREAMING_SNAKE_CASE ) )
return r
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = test.split("::" )[0]
if test.startswith("tests/models/" ):
__lowercase = test.split("/" )[2]
else:
__lowercase = None
return test
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
__lowercase = [(x[0], x[1], get_model(x[2] )) for x in logs]
__lowercase = [x for x in logs if x[2] is not None]
__lowercase = {x[2] for x in logs}
__lowercase = {}
for test in tests:
__lowercase = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
__lowercase = counter.most_common()
__lowercase = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
__lowercase = sum(error_counts.values() )
if n_errors > 0:
__lowercase = {"count": n_errors, "errors": error_counts}
__lowercase = dict(sorted(r.items() , key=lambda _SCREAMING_SNAKE_CASE : item[1]["count"] , reverse=_SCREAMING_SNAKE_CASE ) )
return r
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = "| no. | error | status |"
__lowercase = "|-:|:-|:-|"
__lowercase = [header, sep]
for error in reduced_by_error:
__lowercase = reduced_by_error[error]["count"]
__lowercase = F"""| {count} | {error[:1_0_0]} | |"""
lines.append(_SCREAMING_SNAKE_CASE )
return "\n".join(_SCREAMING_SNAKE_CASE )
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = "| model | no. of errors | major error | count |"
__lowercase = "|-:|-:|-:|-:|"
__lowercase = [header, sep]
for model in reduced_by_model:
__lowercase = reduced_by_model[model]["count"]
__lowercase , __lowercase = list(reduced_by_model[model]["errors"].items() )[0]
__lowercase = F"""| {model} | {count} | {error[:6_0]} | {_count} |"""
lines.append(_SCREAMING_SNAKE_CASE )
return "\n".join(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
snake_case__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
snake_case__ : Union[str, Any] = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
snake_case__ : List[Any] = get_job_links(args.workflow_run_id, token=args.token)
snake_case__ : Optional[int] = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
snake_case__ : Dict = k.find(""" / """)
snake_case__ : Optional[int] = k[index + len(""" / """) :]
snake_case__ : Optional[Any] = v
with open(os.path.join(args.output_dir, """job_links.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
snake_case__ : str = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
snake_case__ : int = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
snake_case__ : Optional[int] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
snake_case__ : Tuple = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, """errors.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
snake_case__ : Union[str, Any] = reduce_by_error(errors)
snake_case__ : Optional[int] = reduce_by_model(errors)
snake_case__ : Optional[Any] = make_github_table(reduced_by_error)
snake_case__ : Union[str, Any] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, """reduced_by_error.txt"""), """w""", encoding="""UTF-8""") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, """reduced_by_model.txt"""), """w""", encoding="""UTF-8""") as fp:
fp.write(sa)
| 655 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
snake_case__ : List[str] = logging.get_logger(__name__)
snake_case__ : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
snake_case__ : Optional[Any] = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
snake_case__ : List[str] = {
"""allenai/led-base-16384""": 1_63_84,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def snake_case_ ( ):
__lowercase = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
__lowercase = bs[:]
__lowercase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_SCREAMING_SNAKE_CASE )
cs.append(2**8 + n )
n += 1
__lowercase = [chr(_SCREAMING_SNAKE_CASE ) for n in cs]
return dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = set()
__lowercase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowercase = char
return pairs
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : List[str] = VOCAB_FILES_NAMES
_snake_case : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , lowerCamelCase : Tuple , lowerCamelCase : Tuple , lowerCamelCase : Optional[int]="replace" , lowerCamelCase : Dict="<s>" , lowerCamelCase : Dict="</s>" , lowerCamelCase : Optional[Any]="</s>" , lowerCamelCase : Any="<s>" , lowerCamelCase : List[str]="<unk>" , lowerCamelCase : Union[str, Any]="<pad>" , lowerCamelCase : Any="<mask>" , lowerCamelCase : str=False , **lowerCamelCase : Optional[Any] , ):
'''simple docstring'''
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else bos_token
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else eos_token
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else sep_token
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else cls_token
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else unk_token
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token
super().__init__(
errors=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , add_prefix_space=lowerCamelCase , **lowerCamelCase , )
with open(lowerCamelCase , encoding="utf-8" ) as vocab_handle:
__lowercase = json.load(lowerCamelCase )
__lowercase = {v: k for k, v in self.encoder.items()}
__lowercase = errors # how to handle errors in decoding
__lowercase = bytes_to_unicode()
__lowercase = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase , encoding="utf-8" ) as merges_handle:
__lowercase = merges_handle.read().split("\n" )[1:-1]
__lowercase = [tuple(merge.split() ) for merge in bpe_merges]
__lowercase = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
__lowercase = {}
__lowercase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__lowercase = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
return len(self.encoder )
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self : List[Any] , lowerCamelCase : str ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__lowercase = tuple(lowerCamelCase )
__lowercase = get_pairs(lowerCamelCase )
if not pairs:
return token
while True:
__lowercase = min(lowerCamelCase , key=lambda lowerCamelCase : self.bpe_ranks.get(lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__lowercase , __lowercase = bigram
__lowercase = []
__lowercase = 0
while i < len(lowerCamelCase ):
try:
__lowercase = word.index(lowerCamelCase , lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowercase = j
if word[i] == first and i < len(lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowercase = tuple(lowerCamelCase )
__lowercase = new_word
if len(lowerCamelCase ) == 1:
break
else:
__lowercase = get_pairs(lowerCamelCase )
__lowercase = " ".join(lowerCamelCase )
__lowercase = word
return word
def _snake_case ( self : List[Any] , lowerCamelCase : Tuple ):
'''simple docstring'''
__lowercase = []
for token in re.findall(self.pat , lowerCamelCase ):
__lowercase = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase ).split(" " ) )
return bpe_tokens
def _snake_case ( self : Dict , lowerCamelCase : Optional[int] ):
'''simple docstring'''
return self.encoder.get(lowerCamelCase , self.encoder.get(self.unk_token ) )
def _snake_case ( self : str , lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return self.decoder.get(lowerCamelCase )
def _snake_case ( self : Union[str, Any] , lowerCamelCase : int ):
'''simple docstring'''
__lowercase = "".join(lowerCamelCase )
__lowercase = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _snake_case ( self : Optional[Any] , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowercase = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__lowercase = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase , ensure_ascii=lowerCamelCase ) + "\n" )
__lowercase = 0
with open(lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
__lowercase = token_index
writer.write(" ".join(lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def _snake_case ( self : Tuple , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowercase = [self.cls_token_id]
__lowercase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self : str , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None , lowerCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase )) + [1]
return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1]
def _snake_case ( self : int , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self : Dict , lowerCamelCase : Any , lowerCamelCase : Tuple=False , **lowerCamelCase : Any ):
'''simple docstring'''
__lowercase = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase ) > 0 and not text[0].isspace()):
__lowercase = " " + text
return (text, kwargs)
def _snake_case ( self : List[Any] , lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase : Optional[int] = None , lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[bool] = None , ):
'''simple docstring'''
__lowercase = super()._pad(
encoded_inputs=lowerCamelCase , max_length=lowerCamelCase , padding_strategy=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
__lowercase = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__lowercase = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__lowercase = len(encoded_inputs["global_attention_mask"] ) != len(lowerCamelCase )
if needs_to_be_padded:
__lowercase = len(lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__lowercase = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
__lowercase = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 655 | 1 |
from __future__ import annotations
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655 |
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError("The length of profit and weight must be same." )
if max_weight <= 0:
raise ValueError("max_weight must greater than zero." )
if any(p < 0 for p in profit ):
raise ValueError("Profit can not be negative." )
if any(w < 0 for w in weight ):
raise ValueError("Weight can not be negative." )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
__lowercase = [p / w for p, w in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )]
# Creating a copy of the list and sorting profit/weight in ascending order
__lowercase = sorted(_SCREAMING_SNAKE_CASE )
# declaring useful variables
__lowercase = len(_SCREAMING_SNAKE_CASE )
__lowercase = 0
__lowercase = 0
__lowercase = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
__lowercase = sorted_profit_by_weight[length - i - 1]
__lowercase = profit_by_weight.index(_SCREAMING_SNAKE_CASE )
__lowercase = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"""Input profits, weights, and then max_weight (all positive ints) separated by """
"""spaces."""
)
snake_case__ : str = [int(x) for x in input("""Input profits separated by spaces: """).split()]
snake_case__ : str = [int(x) for x in input("""Input weights separated by spaces: """).split()]
snake_case__ : Optional[Any] = int(input("""Max weight allowed: """))
# Function Call
calc_profit(profit, weight, max_weight)
| 655 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.