code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
"""simple docstring""" import random import unittest import numpy as np import transformers from transformers import is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax if is_flax_available(): import os import jax.numpy as jnp from jax import jit from transformers import AutoTokenizer, FlaxAutoModelForCausalLM from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model lowerCAmelCase_: Optional[Any] = "0.12" # assumed parallelism: 8 if is_torch_available(): import torch def __a ( A , A , A=None ): '''simple docstring''' if rng is None: lowercase__ = random.Random() lowercase__ = 1 for dim in shape: total_dims *= dim lowercase__ = [] for _ in range(A ): values.append(rng.randint(0 , vocab_size - 1 ) ) lowercase__ = np.array(A , dtype=jnp.intaa ).reshape(A ) return output def __a ( A , A=None ): '''simple docstring''' lowercase__ = ids_tensor(A , vocab_size=2 , rng=A ) # make sure that at least one token is attended to for each batch lowercase__ = 1 return attn_mask @require_flax class a__ : snake_case_ = None snake_case_ = () def snake_case__ ( self ): '''simple docstring''' lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() # cut to half length & take max batch_size 3 lowercase__ = 2 lowercase__ = inputs["input_ids"].shape[-1] // 2 lowercase__ = inputs["input_ids"][:max_batch_size, :sequence_length] lowercase__ = jnp.ones_like(_UpperCAmelCase ) lowercase__ = attention_mask[:max_batch_size, :sequence_length] # generate max 5 tokens lowercase__ = input_ids.shape[-1] + 5 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` lowercase__ = config.eos_token_id return config, input_ids, attention_mask, max_length @is_pt_flax_cross_test def snake_case__ ( self ): '''simple docstring''' lowercase__ , lowercase__ , lowercase__ , lowercase__ = self._get_input_ids_and_config() lowercase__ = False lowercase__ = max_length lowercase__ = 0 for model_class in self.all_generative_model_classes: lowercase__ = model_class(_UpperCAmelCase ) lowercase__ = model_class.__name__[4:] # Skip the "Flax" at the beginning lowercase__ = getattr(_UpperCAmelCase, _UpperCAmelCase ) lowercase__ = pt_model_class(_UpperCAmelCase ).eval() lowercase__ = load_flax_weights_in_pytorch_model(_UpperCAmelCase, flax_model.params ) lowercase__ = flax_model.generate(_UpperCAmelCase ).sequences lowercase__ = pt_model.generate(torch.tensor(_UpperCAmelCase, dtype=torch.long ) ) if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]: lowercase__ = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]] self.assertListEqual(pt_generation_outputs.numpy().tolist(), flax_generation_outputs.tolist() ) def snake_case__ ( self ): '''simple docstring''' lowercase__ , lowercase__ , lowercase__ , lowercase__ = self._get_input_ids_and_config() lowercase__ = False lowercase__ = max_length for model_class in self.all_generative_model_classes: lowercase__ = model_class(_UpperCAmelCase ) lowercase__ = model.generate(_UpperCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1], _UpperCAmelCase ) lowercase__ = jit(model.generate ) lowercase__ = jit_generate(_UpperCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() ) def snake_case__ ( self ): '''simple docstring''' lowercase__ , lowercase__ , lowercase__ , lowercase__ = self._get_input_ids_and_config() lowercase__ = True lowercase__ = max_length for model_class in self.all_generative_model_classes: lowercase__ = model_class(_UpperCAmelCase ) lowercase__ = model.generate(_UpperCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1], _UpperCAmelCase ) lowercase__ = jit(model.generate ) lowercase__ = jit_generate(_UpperCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() ) def snake_case__ ( self ): '''simple docstring''' lowercase__ , lowercase__ , lowercase__ , lowercase__ = self._get_input_ids_and_config() lowercase__ = False lowercase__ = max_length lowercase__ = 2 for model_class in self.all_generative_model_classes: lowercase__ = model_class(_UpperCAmelCase ) lowercase__ = model.generate(_UpperCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1], _UpperCAmelCase ) lowercase__ = jit(model.generate ) lowercase__ = jit_generate(_UpperCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() ) def snake_case__ ( self ): '''simple docstring''' lowercase__ , lowercase__ , lowercase__ , lowercase__ = self._get_input_ids_and_config() lowercase__ = False lowercase__ = max_length lowercase__ = 2 lowercase__ = 2 for model_class in self.all_generative_model_classes: lowercase__ = model_class(_UpperCAmelCase ) lowercase__ = model.generate(_UpperCAmelCase ).sequences self.assertEqual(generation_outputs.shape[0], input_ids.shape[0] * config.num_return_sequences ) def snake_case__ ( self ): '''simple docstring''' lowercase__ , lowercase__ , lowercase__ , lowercase__ = self._get_input_ids_and_config() lowercase__ = True lowercase__ = max_length lowercase__ = 0.8 lowercase__ = 10 lowercase__ = 0.3 lowercase__ = 1 lowercase__ = 8 lowercase__ = 9 for model_class in self.all_generative_model_classes: lowercase__ = model_class(_UpperCAmelCase ) lowercase__ = model.generate(_UpperCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1], _UpperCAmelCase ) lowercase__ = jit(model.generate ) lowercase__ = jit_generate(_UpperCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() ) def snake_case__ ( self ): '''simple docstring''' lowercase__ , lowercase__ , lowercase__ , lowercase__ = self._get_input_ids_and_config() lowercase__ = max_length lowercase__ = 1 lowercase__ = 8 lowercase__ = 9 for model_class in self.all_generative_model_classes: lowercase__ = model_class(_UpperCAmelCase ) lowercase__ = model.generate(_UpperCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1], _UpperCAmelCase ) lowercase__ = jit(model.generate ) lowercase__ = jit_generate(_UpperCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() ) def snake_case__ ( self ): '''simple docstring''' lowercase__ , lowercase__ , lowercase__ , lowercase__ = self._get_input_ids_and_config() lowercase__ = max_length lowercase__ = 2 lowercase__ = 1 lowercase__ = 8 lowercase__ = 9 for model_class in self.all_generative_model_classes: lowercase__ = model_class(_UpperCAmelCase ) lowercase__ = model.generate(_UpperCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1], _UpperCAmelCase ) lowercase__ = jit(model.generate ) lowercase__ = jit_generate(_UpperCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() ) def snake_case__ ( self ): '''simple docstring''' lowercase__ , lowercase__ , lowercase__ , lowercase__ = self._get_input_ids_and_config() # pad attention mask on the left lowercase__ = attention_mask.at[(0, 0)].set(0 ) lowercase__ = False lowercase__ = max_length for model_class in self.all_generative_model_classes: lowercase__ = model_class(_UpperCAmelCase ) lowercase__ = model.generate(_UpperCAmelCase, attention_mask=_UpperCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1], _UpperCAmelCase ) lowercase__ = jit(model.generate ) lowercase__ = jit_generate(_UpperCAmelCase, attention_mask=_UpperCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() ) def snake_case__ ( self ): '''simple docstring''' lowercase__ , lowercase__ , lowercase__ , lowercase__ = self._get_input_ids_and_config() # pad attention mask on the left lowercase__ = attention_mask.at[(0, 0)].set(0 ) lowercase__ = True lowercase__ = max_length for model_class in self.all_generative_model_classes: lowercase__ = model_class(_UpperCAmelCase ) lowercase__ = model.generate(_UpperCAmelCase, attention_mask=_UpperCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1], _UpperCAmelCase ) lowercase__ = jit(model.generate ) lowercase__ = jit_generate(_UpperCAmelCase, attention_mask=_UpperCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() ) def snake_case__ ( self ): '''simple docstring''' lowercase__ , lowercase__ , lowercase__ , lowercase__ = self._get_input_ids_and_config() # pad attention mask on the left lowercase__ = attention_mask.at[(0, 0)].set(0 ) lowercase__ = 2 lowercase__ = max_length for model_class in self.all_generative_model_classes: lowercase__ = model_class(_UpperCAmelCase ) lowercase__ = model.generate(_UpperCAmelCase, attention_mask=_UpperCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1], _UpperCAmelCase ) lowercase__ = jit(model.generate ) lowercase__ = jit_generate(_UpperCAmelCase, attention_mask=_UpperCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() ) @require_flax class a__ ( unittest.TestCase ): def snake_case__ ( self ): '''simple docstring''' lowercase__ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" ) lowercase__ = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" ) lowercase__ = "Hello world" lowercase__ = tokenizer(_UpperCAmelCase, return_tensors="np" ).input_ids # typos are quickly detected (the correct argument is `do_sample`) with self.assertRaisesRegex(_UpperCAmelCase, "do_samples" ): model.generate(_UpperCAmelCase, do_samples=_UpperCAmelCase ) # arbitrary arguments that will not be used anywhere are also not accepted with self.assertRaisesRegex(_UpperCAmelCase, "foo" ): lowercase__ = {"foo": "bar"} model.generate(_UpperCAmelCase, **_UpperCAmelCase )
668
"""simple docstring""" import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class a__ ( _a ): snake_case_ = (IPNDMScheduler,) snake_case_ = (("num_inference_steps", 50),) def snake_case__ ( self, **_UpperCAmelCase ): '''simple docstring''' lowercase__ = {"num_train_timesteps": 1000} config.update(**_UpperCAmelCase ) return config def snake_case__ ( self, _UpperCAmelCase=0, **_UpperCAmelCase ): '''simple docstring''' lowercase__ = dict(self.forward_default_kwargs ) lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase ) lowercase__ = self.dummy_sample lowercase__ = 0.1 * sample lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: lowercase__ = self.get_scheduler_config(**_UpperCAmelCase ) lowercase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals lowercase__ = dummy_past_residuals[:] if time_step is None: lowercase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_UpperCAmelCase ) lowercase__ = scheduler_class.from_pretrained(_UpperCAmelCase ) new_scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals lowercase__ = dummy_past_residuals[:] lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def snake_case__ ( self ): '''simple docstring''' pass def snake_case__ ( self, _UpperCAmelCase=0, **_UpperCAmelCase ): '''simple docstring''' lowercase__ = dict(self.forward_default_kwargs ) lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase ) lowercase__ = self.dummy_sample lowercase__ = 0.1 * sample lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: lowercase__ = self.get_scheduler_config() lowercase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals (must be after setting timesteps) lowercase__ = dummy_past_residuals[:] if time_step is None: lowercase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_UpperCAmelCase ) lowercase__ = scheduler_class.from_pretrained(_UpperCAmelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residual (must be after setting timesteps) lowercase__ = dummy_past_residuals[:] lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def snake_case__ ( self, **_UpperCAmelCase ): '''simple docstring''' lowercase__ = self.scheduler_classes[0] lowercase__ = self.get_scheduler_config(**_UpperCAmelCase ) lowercase__ = scheduler_class(**_UpperCAmelCase ) lowercase__ = 10 lowercase__ = self.dummy_model() lowercase__ = self.dummy_sample_deter scheduler.set_timesteps(_UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): lowercase__ = model(_UpperCAmelCase, _UpperCAmelCase ) lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ).prev_sample for i, t in enumerate(scheduler.timesteps ): lowercase__ = model(_UpperCAmelCase, _UpperCAmelCase ) lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ).prev_sample return sample def snake_case__ ( self ): '''simple docstring''' lowercase__ = dict(self.forward_default_kwargs ) lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase ) for scheduler_class in self.scheduler_classes: lowercase__ = self.get_scheduler_config() lowercase__ = scheduler_class(**_UpperCAmelCase ) lowercase__ = self.dummy_sample lowercase__ = 0.1 * sample if num_inference_steps is not None and hasattr(_UpperCAmelCase, "set_timesteps" ): scheduler.set_timesteps(_UpperCAmelCase ) elif num_inference_steps is not None and not hasattr(_UpperCAmelCase, "set_timesteps" ): lowercase__ = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] lowercase__ = dummy_past_residuals[:] lowercase__ = scheduler.timesteps[5] lowercase__ = scheduler.timesteps[6] lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample self.assertEqual(output_a.shape, sample.shape ) self.assertEqual(output_a.shape, output_a.shape ) lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample self.assertEqual(output_a.shape, sample.shape ) self.assertEqual(output_a.shape, output_a.shape ) def snake_case__ ( self ): '''simple docstring''' for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=_UpperCAmelCase, time_step=_UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100] ): self.check_over_forward(num_inference_steps=_UpperCAmelCase, time_step=_UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.full_loop() lowercase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_mean.item() - 254_0529 ) < 10
668
1
"""simple docstring""" import itertools import os import random import tempfile import unittest import numpy as np from transformers import TvltFeatureExtractor, is_datasets_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch if is_datasets_available(): from datasets import load_dataset lowerCAmelCase_: Optional[int] = random.Random() def __a ( A , A=1.0 , A=None , A=None ): '''simple docstring''' if rng is None: lowercase__ = global_rng lowercase__ = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class a__ ( unittest.TestCase ): def __init__( self, _UpperCAmelCase, _UpperCAmelCase=7, _UpperCAmelCase=400, _UpperCAmelCase=2000, _UpperCAmelCase=2048, _UpperCAmelCase=128, _UpperCAmelCase=1, _UpperCAmelCase=512, _UpperCAmelCase=30, _UpperCAmelCase=4_4100, ): '''simple docstring''' lowercase__ = parent lowercase__ = batch_size lowercase__ = min_seq_length lowercase__ = max_seq_length lowercase__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) lowercase__ = spectrogram_length lowercase__ = feature_size lowercase__ = num_audio_channels lowercase__ = hop_length lowercase__ = chunk_length lowercase__ = sampling_rate def snake_case__ ( self ): '''simple docstring''' return { "spectrogram_length": self.spectrogram_length, "feature_size": self.feature_size, "num_audio_channels": self.num_audio_channels, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "sampling_rate": self.sampling_rate, } def snake_case__ ( self, _UpperCAmelCase=False, _UpperCAmelCase=False ): '''simple docstring''' def _flatten(_UpperCAmelCase ): return list(itertools.chain(*_UpperCAmelCase ) ) if equal_length: lowercase__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size lowercase__ = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff ) ] if numpify: lowercase__ = [np.asarray(_UpperCAmelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class a__ ( _a , unittest.TestCase ): snake_case_ = TvltFeatureExtractor def snake_case__ ( self ): '''simple docstring''' lowercase__ = TvltFeatureExtractionTester(self ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(_UpperCAmelCase, "spectrogram_length" ) ) self.assertTrue(hasattr(_UpperCAmelCase, "feature_size" ) ) self.assertTrue(hasattr(_UpperCAmelCase, "num_audio_channels" ) ) self.assertTrue(hasattr(_UpperCAmelCase, "hop_length" ) ) self.assertTrue(hasattr(_UpperCAmelCase, "chunk_length" ) ) self.assertTrue(hasattr(_UpperCAmelCase, "sampling_rate" ) ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowercase__ = feat_extract_first.save_pretrained(_UpperCAmelCase )[0] check_json_file_has_correct_format(_UpperCAmelCase ) lowercase__ = self.feature_extraction_class.from_pretrained(_UpperCAmelCase ) lowercase__ = feat_extract_first.to_dict() lowercase__ = feat_extract_second.to_dict() lowercase__ = dict_first.pop("mel_filters" ) lowercase__ = dict_second.pop("mel_filters" ) self.assertTrue(np.allclose(_UpperCAmelCase, _UpperCAmelCase ) ) self.assertEqual(_UpperCAmelCase, _UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowercase__ = os.path.join(_UpperCAmelCase, "feat_extract.json" ) feat_extract_first.to_json_file(_UpperCAmelCase ) lowercase__ = self.feature_extraction_class.from_json_file(_UpperCAmelCase ) lowercase__ = feat_extract_first.to_dict() lowercase__ = feat_extract_second.to_dict() lowercase__ = dict_first.pop("mel_filters" ) lowercase__ = dict_second.pop("mel_filters" ) self.assertTrue(np.allclose(_UpperCAmelCase, _UpperCAmelCase ) ) self.assertEqual(_UpperCAmelCase, _UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.feature_extraction_class(**self.feat_extract_dict ) # create three inputs of length 800, 1000, and 1200 lowercase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )] lowercase__ = [np.asarray(_UpperCAmelCase ) for speech_input in speech_inputs] # Test not batched input lowercase__ = feature_extractor(np_speech_inputs[0], return_tensors="np", sampling_rate=4_4100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test batched lowercase__ = feature_extractor(_UpperCAmelCase, return_tensors="np", sampling_rate=4_4100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test audio masking lowercase__ = feature_extractor( _UpperCAmelCase, return_tensors="np", sampling_rate=4_4100, mask_audio=_UpperCAmelCase ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test 2-D numpy arrays are batched. lowercase__ = [floats_list((1, x) )[0] for x in (800, 800, 800)] lowercase__ = np.asarray(_UpperCAmelCase ) lowercase__ = feature_extractor(_UpperCAmelCase, return_tensors="np", sampling_rate=4_4100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' lowercase__ = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation" ) # automatic decoding with librispeech lowercase__ = ds.sort("id" ).select(range(_UpperCAmelCase ) )[:num_samples]["audio"] return [x["array"] for x in speech_samples] def snake_case__ ( self ): '''simple docstring''' lowercase__ = self._load_datasamples(1 ) lowercase__ = TvltFeatureExtractor() lowercase__ = feature_extractor(_UpperCAmelCase, return_tensors="pt" ).audio_values self.assertEquals(audio_values.shape, (1, 1, 192, 128) ) lowercase__ = torch.tensor([[-0.3_032, -0.2_708], [-0.4_434, -0.4_007]] ) self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2], _UpperCAmelCase, atol=1E-4 ) )
668
"""simple docstring""" import json import os import unittest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class a__ ( _a , unittest.TestCase ): snake_case_ = MgpstrTokenizer snake_case_ = False snake_case_ = {} snake_case_ = False def snake_case__ ( self ): '''simple docstring''' super().setUp() # fmt: off lowercase__ = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"] # fmt: on lowercase__ = dict(zip(_UpperCAmelCase, range(len(_UpperCAmelCase ) ) ) ) lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file, "w", encoding="utf-8" ) as fp: fp.write(json.dumps(_UpperCAmelCase ) + "\n" ) def snake_case__ ( self, **_UpperCAmelCase ): '''simple docstring''' return MgpstrTokenizer.from_pretrained(self.tmpdirname, **_UpperCAmelCase ) def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' lowercase__ = "tester" lowercase__ = "tester" return input_text, output_text @unittest.skip("MGP-STR always lower cases letters." ) def snake_case__ ( self ): '''simple docstring''' pass def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.get_tokenizers(do_lower_case=_UpperCAmelCase ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): lowercase__ = "[SPECIAL_TOKEN]" tokenizer.add_special_tokens({"cls_token": special_token} ) lowercase__ = tokenizer.encode([special_token], add_special_tokens=_UpperCAmelCase ) self.assertEqual(len(_UpperCAmelCase ), 1 ) lowercase__ = tokenizer.decode(_UpperCAmelCase, skip_special_tokens=_UpperCAmelCase ) self.assertTrue(special_token not in decoded ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): lowercase__ , lowercase__ = self.get_input_output_texts(_UpperCAmelCase ) lowercase__ = tokenizer.tokenize(_UpperCAmelCase ) lowercase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) self.assertNotEqual(len(_UpperCAmelCase ), 0 ) lowercase__ = tokenizer.decode(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase ) self.assertEqual(text_a.replace(" ", "" ), _UpperCAmelCase ) @unittest.skip("MGP-STR tokenizer only handles one sequence." ) def snake_case__ ( self ): '''simple docstring''' pass @unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" ) def snake_case__ ( self ): '''simple docstring''' pass
668
1
"""simple docstring""" def __a ( A , A ): '''simple docstring''' return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2 def __a ( A , A=0 ): '''simple docstring''' return sorted(A , key=lambda A : x[column] ) def __a ( A , A , A=float("inf" ) ): '''simple docstring''' for i in range(points_counts - 1 ): for j in range(i + 1 , A ): lowercase__ = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: lowercase__ = current_dis return min_dis def __a ( A , A , A=float("inf" ) ): '''simple docstring''' for i in range(min(6 , points_counts - 1 ) , A ): for j in range(max(0 , i - 6 ) , A ): lowercase__ = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: lowercase__ = current_dis return min_dis def __a ( A , A , A ): '''simple docstring''' if points_counts <= 3: return dis_between_closest_pair(A , A ) # recursion lowercase__ = points_counts // 2 lowercase__ = closest_pair_of_points_sqr( A , points_sorted_on_y[:mid] , A ) lowercase__ = closest_pair_of_points_sqr( A , points_sorted_on_y[mid:] , points_counts - mid ) lowercase__ = min(A , A ) lowercase__ = [] for point in points_sorted_on_x: if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis: cross_strip.append(A ) lowercase__ = dis_between_closest_in_strip( A , len(A ) , A ) return min(A , A ) def __a ( A , A ): '''simple docstring''' lowercase__ = column_based_sort(A , column=0 ) lowercase__ = column_based_sort(A , column=1 ) return ( closest_pair_of_points_sqr( A , A , A ) ) ** 0.5 if __name__ == "__main__": lowerCAmelCase_: Tuple = [(2, 3), (1_2, 3_0), (4_0, 5_0), (5, 1), (1_2, 1_0), (3, 4)] print("Distance:", closest_pair_of_points(points, len(points)))
668
"""simple docstring""" from ...utils import is_note_seq_available, is_transformers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .notes_encoder import SpectrogramNotesEncoder from .continous_encoder import SpectrogramContEncoder from .pipeline_spectrogram_diffusion import ( SpectrogramContEncoder, SpectrogramDiffusionPipeline, TaFilmDecoder, ) try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .midi_utils import MidiProcessor
668
1
"""simple docstring""" from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class a__ ( nn.Module ): def __init__( self, _UpperCAmelCase = 16, _UpperCAmelCase = 88, _UpperCAmelCase = None, _UpperCAmelCase = 1, _UpperCAmelCase = 0.0, _UpperCAmelCase = 32, _UpperCAmelCase = None, _UpperCAmelCase = False, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = "geglu", _UpperCAmelCase = None, ): '''simple docstring''' super().__init__() lowercase__ = nn.ModuleList( [ TransformeraDModel( num_attention_heads=_UpperCAmelCase, attention_head_dim=_UpperCAmelCase, in_channels=_UpperCAmelCase, num_layers=_UpperCAmelCase, dropout=_UpperCAmelCase, norm_num_groups=_UpperCAmelCase, cross_attention_dim=_UpperCAmelCase, attention_bias=_UpperCAmelCase, sample_size=_UpperCAmelCase, num_vector_embeds=_UpperCAmelCase, activation_fn=_UpperCAmelCase, num_embeds_ada_norm=_UpperCAmelCase, ) for _ in range(2 ) ] ) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference lowercase__ = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` lowercase__ = [77, 257] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` lowercase__ = [1, 0] def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=None, _UpperCAmelCase=None, _UpperCAmelCase=None, _UpperCAmelCase = True, ): '''simple docstring''' lowercase__ = hidden_states lowercase__ = [] lowercase__ = 0 # attention_mask is not used yet for i in range(2 ): # for each of the two transformers, pass the corresponding condition tokens lowercase__ = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] lowercase__ = self.transformer_index_for_condition[i] lowercase__ = self.transformers[transformer_index]( _UpperCAmelCase, encoder_hidden_states=_UpperCAmelCase, timestep=_UpperCAmelCase, cross_attention_kwargs=_UpperCAmelCase, return_dict=_UpperCAmelCase, )[0] encoded_states.append(encoded_state - input_states ) tokens_start += self.condition_lengths[i] lowercase__ = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) lowercase__ = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=_UpperCAmelCase )
668
"""simple docstring""" from typing import Any import numpy as np def __a ( A ): '''simple docstring''' return np.array_equal(A , matrix.conjugate().T ) def __a ( A , A ): '''simple docstring''' lowercase__ = v.conjugate().T lowercase__ = v_star.dot(A ) assert isinstance(A , np.ndarray ) return (v_star_dot.dot(A )) / (v_star.dot(A )) def __a ( ): '''simple docstring''' lowercase__ = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] ) lowercase__ = np.array([[1], [2], [3]] ) assert is_hermitian(A ), f'''{a} is not hermitian.''' print(rayleigh_quotient(A , A ) ) lowercase__ = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] ) assert is_hermitian(A ), f'''{a} is not hermitian.''' assert rayleigh_quotient(A , A ) == float(3 ) if __name__ == "__main__": import doctest doctest.testmod() tests()
668
1
"""simple docstring""" import inspect import unittest from transformers import RegNetConfig, is_flax_available from transformers.testing_utils import require_flax, slow from transformers.utils import cached_property, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class a__ ( unittest.TestCase ): def __init__( self, _UpperCAmelCase, _UpperCAmelCase=3, _UpperCAmelCase=32, _UpperCAmelCase=3, _UpperCAmelCase=10, _UpperCAmelCase=[10, 20, 30, 40], _UpperCAmelCase=[1, 1, 2, 1], _UpperCAmelCase=True, _UpperCAmelCase=True, _UpperCAmelCase="relu", _UpperCAmelCase=3, _UpperCAmelCase=None, ): '''simple docstring''' lowercase__ = parent lowercase__ = batch_size lowercase__ = image_size lowercase__ = num_channels lowercase__ = embeddings_size lowercase__ = hidden_sizes lowercase__ = depths lowercase__ = is_training lowercase__ = use_labels lowercase__ = hidden_act lowercase__ = num_labels lowercase__ = scope lowercase__ = len(_UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ = self.get_config() return config, pixel_values def snake_case__ ( self ): '''simple docstring''' return RegNetConfig( num_channels=self.num_channels, embeddings_size=self.embeddings_size, hidden_sizes=self.hidden_sizes, depths=self.depths, hidden_act=self.hidden_act, num_labels=self.num_labels, image_size=self.image_size, ) def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' lowercase__ = FlaxRegNetModel(config=_UpperCAmelCase ) lowercase__ = model(_UpperCAmelCase ) # Output shape (b, c, h, w) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), ) def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' lowercase__ = self.num_labels lowercase__ = FlaxRegNetForImageClassification(config=_UpperCAmelCase ) lowercase__ = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.prepare_config_and_inputs() lowercase__ , lowercase__ = config_and_inputs lowercase__ = {"pixel_values": pixel_values} return config, inputs_dict @require_flax class a__ ( _a , unittest.TestCase ): snake_case_ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else () snake_case_ = False snake_case_ = False snake_case_ = False def snake_case__ ( self ): '''simple docstring''' lowercase__ = FlaxRegNetModelTester(self ) lowercase__ = ConfigTester(self, config_class=_UpperCAmelCase, has_text_modality=_UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def snake_case__ ( self ): '''simple docstring''' return def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase ) @unittest.skip(reason="RegNet does not use inputs_embeds" ) def snake_case__ ( self ): '''simple docstring''' pass @unittest.skip(reason="RegNet does not support input and output embeddings" ) def snake_case__ ( self ): '''simple docstring''' pass def snake_case__ ( self ): '''simple docstring''' lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ = model_class(_UpperCAmelCase ) lowercase__ = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ = [*signature.parameters.keys()] lowercase__ = ["pixel_values"] self.assertListEqual(arg_names[:1], _UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' def check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ): lowercase__ = model_class(_UpperCAmelCase ) lowercase__ = model(**self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase ) ) lowercase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowercase__ = self.model_tester.num_stages self.assertEqual(len(_UpperCAmelCase ), expected_num_stages + 1 ) lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ = True check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ = True check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowercase__ = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase ) lowercase__ = model_class(_UpperCAmelCase ) @jax.jit def model_jitted(_UpperCAmelCase, **_UpperCAmelCase ): return model(pixel_values=_UpperCAmelCase, **_UpperCAmelCase ) with self.subTest("JIT Enabled" ): lowercase__ = model_jitted(**_UpperCAmelCase ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): lowercase__ = model_jitted(**_UpperCAmelCase ).to_tuple() self.assertEqual(len(_UpperCAmelCase ), len(_UpperCAmelCase ) ) for jitted_output, output in zip(_UpperCAmelCase, _UpperCAmelCase ): self.assertEqual(jitted_output.shape, output.shape ) def __a ( ): '''simple docstring''' lowercase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_flax class a__ ( unittest.TestCase ): @cached_property def snake_case__ ( self ): '''simple docstring''' return AutoImageProcessor.from_pretrained("facebook/regnet-y-040" ) if is_vision_available() else None @slow def snake_case__ ( self ): '''simple docstring''' lowercase__ = FlaxRegNetForImageClassification.from_pretrained("facebook/regnet-y-040" ) lowercase__ = self.default_image_processor lowercase__ = prepare_img() lowercase__ = image_processor(images=_UpperCAmelCase, return_tensors="np" ) lowercase__ = model(**_UpperCAmelCase ) # verify the logits lowercase__ = (1, 1000) self.assertEqual(outputs.logits.shape, _UpperCAmelCase ) lowercase__ = jnp.array([-0.4_180, -1.5_051, -3.4_836] ) self.assertTrue(jnp.allclose(outputs.logits[0, :3], _UpperCAmelCase, atol=1E-4 ) )
668
"""simple docstring""" import gc import inspect import unittest import torch from parameterized import parameterized from diffusers import PriorTransformer from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin enable_full_determinism() class a__ ( _a , unittest.TestCase ): snake_case_ = PriorTransformer snake_case_ = "hidden_states" @property def snake_case__ ( self ): '''simple docstring''' lowercase__ = 4 lowercase__ = 8 lowercase__ = 7 lowercase__ = floats_tensor((batch_size, embedding_dim) ).to(_UpperCAmelCase ) lowercase__ = floats_tensor((batch_size, embedding_dim) ).to(_UpperCAmelCase ) lowercase__ = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def snake_case__ ( self, _UpperCAmelCase=0 ): '''simple docstring''' torch.manual_seed(_UpperCAmelCase ) lowercase__ = 4 lowercase__ = 8 lowercase__ = 7 lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase ) lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase ) lowercase__ = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } @property def snake_case__ ( self ): '''simple docstring''' return (4, 8) @property def snake_case__ ( self ): '''simple docstring''' return (4, 8) def snake_case__ ( self ): '''simple docstring''' lowercase__ = { "num_attention_heads": 2, "attention_head_dim": 4, "num_layers": 2, "embedding_dim": 8, "num_embeddings": 7, "additional_embeddings": 4, } lowercase__ = self.dummy_input return init_dict, inputs_dict def snake_case__ ( self ): '''simple docstring''' lowercase__ , lowercase__ = PriorTransformer.from_pretrained( "hf-internal-testing/prior-dummy", output_loading_info=_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) self.assertEqual(len(loading_info["missing_keys"] ), 0 ) model.to(_UpperCAmelCase ) lowercase__ = model(**self.dummy_input )[0] assert hidden_states is not None, "Make sure output is not None" def snake_case__ ( self ): '''simple docstring''' lowercase__ , lowercase__ = self.prepare_init_args_and_inputs_for_common() lowercase__ = self.model_class(**_UpperCAmelCase ) lowercase__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ = [*signature.parameters.keys()] lowercase__ = ["hidden_states", "timestep"] self.assertListEqual(arg_names[:2], _UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy" ) lowercase__ = model.to(_UpperCAmelCase ) if hasattr(_UpperCAmelCase, "set_default_attn_processor" ): model.set_default_attn_processor() lowercase__ = self.get_dummy_seed_input() with torch.no_grad(): lowercase__ = model(**_UpperCAmelCase )[0] lowercase__ = output[0, :5].flatten().cpu() print(_UpperCAmelCase ) # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. lowercase__ = torch.tensor([-1.3_436, -0.2_870, 0.7_538, 0.4_368, -0.0_239] ) self.assertTrue(torch_all_close(_UpperCAmelCase, _UpperCAmelCase, rtol=1E-2 ) ) @slow class a__ ( unittest.TestCase ): def snake_case__ ( self, _UpperCAmelCase=1, _UpperCAmelCase=768, _UpperCAmelCase=77, _UpperCAmelCase=0 ): '''simple docstring''' torch.manual_seed(_UpperCAmelCase ) lowercase__ = batch_size lowercase__ = embedding_dim lowercase__ = num_embeddings lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase ) lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase ) lowercase__ = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def snake_case__ ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @parameterized.expand( [ # fmt: off [13, [-0.5_861, 0.1_283, -0.0_931, 0.0_882, 0.4_476, 0.1_329, -0.0_498, 0.0_640]], [37, [-0.4_913, 0.0_110, -0.0_483, 0.0_541, 0.4_954, -0.0_170, 0.0_354, 0.1_651]], # fmt: on ] ) def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' lowercase__ = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior", subfolder="prior" ) model.to(_UpperCAmelCase ) lowercase__ = self.get_dummy_seed_input(seed=_UpperCAmelCase ) with torch.no_grad(): lowercase__ = model(**_UpperCAmelCase )[0] assert list(sample.shape ) == [1, 768] lowercase__ = sample[0, :8].flatten().cpu() print(_UpperCAmelCase ) lowercase__ = torch.tensor(_UpperCAmelCase ) assert torch_all_close(_UpperCAmelCase, _UpperCAmelCase, atol=1E-3 )
668
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) lowerCAmelCase_: Tuple = { "configuration_speecht5": [ "SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP", "SpeechT5Config", "SpeechT5HifiGanConfig", ], "feature_extraction_speecht5": ["SpeechT5FeatureExtractor"], "processing_speecht5": ["SpeechT5Processor"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_: Any = ["SpeechT5Tokenizer"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_: Union[str, Any] = [ "SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST", "SpeechT5ForSpeechToText", "SpeechT5ForSpeechToSpeech", "SpeechT5ForTextToSpeech", "SpeechT5Model", "SpeechT5PreTrainedModel", "SpeechT5HifiGan", ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys lowerCAmelCase_: Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
668
"""simple docstring""" lowerCAmelCase_: Any = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" def __a ( A ): '''simple docstring''' if not isinstance(A , A ): lowercase__ = f'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(A ) lowercase__ = "".join(bin(A )[2:].zfill(8 ) for byte in data ) lowercase__ = len(A ) % 6 != 0 if padding_needed: # The padding that will be added later lowercase__ = b"=" * ((6 - len(A ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(A ) % 6) else: lowercase__ = b"" # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(A ) , 6 ) ).encode() + padding ) def __a ( A ): '''simple docstring''' if not isinstance(A , A ) and not isinstance(A , A ): lowercase__ = ( "argument should be a bytes-like object or ASCII string, " f'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(A ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(A , A ): try: lowercase__ = encoded_data.decode("utf-8" ) except UnicodeDecodeError: raise ValueError("base64 encoded data should only contain ASCII characters" ) lowercase__ = encoded_data.count("=" ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(A ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one lowercase__ = encoded_data[:-padding] lowercase__ = "".join( bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: lowercase__ = "".join( bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data ) lowercase__ = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(A ) , 8 ) ] return bytes(A ) if __name__ == "__main__": import doctest doctest.testmod()
668
1
"""simple docstring""" lowerCAmelCase_: List[str] = { "Pillow": "Pillow<10.0.0", "accelerate": "accelerate>=0.20.3", "av": "av==9.2.0", "beautifulsoup4": "beautifulsoup4", "black": "black~=23.1", "codecarbon": "codecarbon==1.2.0", "cookiecutter": "cookiecutter==1.7.3", "dataclasses": "dataclasses", "datasets": "datasets!=2.5.0", "decord": "decord==0.6.0", "deepspeed": "deepspeed>=0.9.3", "diffusers": "diffusers", "dill": "dill<0.3.5", "evaluate": "evaluate>=0.2.0", "fairscale": "fairscale>0.3", "faiss-cpu": "faiss-cpu", "fastapi": "fastapi", "filelock": "filelock", "flax": "flax>=0.4.1,<=0.7.0", "ftfy": "ftfy", "fugashi": "fugashi>=1.0", "GitPython": "GitPython<3.1.19", "hf-doc-builder": "hf-doc-builder>=0.3.0", "huggingface-hub": "huggingface-hub>=0.14.1,<1.0", "importlib_metadata": "importlib_metadata", "ipadic": "ipadic>=1.0.0,<2.0", "isort": "isort>=5.5.4", "jax": "jax>=0.2.8,!=0.3.2,<=0.4.13", "jaxlib": "jaxlib>=0.1.65,<=0.4.13", "jieba": "jieba", "kenlm": "kenlm", "keras-nlp": "keras-nlp>=0.3.1", "librosa": "librosa", "nltk": "nltk", "natten": "natten>=0.14.6", "numpy": "numpy>=1.17", "onnxconverter-common": "onnxconverter-common", "onnxruntime-tools": "onnxruntime-tools>=1.4.2", "onnxruntime": "onnxruntime>=1.4.0", "opencv-python": "opencv-python", "optuna": "optuna", "optax": "optax>=0.0.8,<=0.1.4", "packaging": "packaging>=20.0", "parameterized": "parameterized", "phonemizer": "phonemizer", "protobuf": "protobuf", "psutil": "psutil", "pyyaml": "pyyaml>=5.1", "pydantic": "pydantic<2", "pytest": "pytest>=7.2.0", "pytest-timeout": "pytest-timeout", "pytest-xdist": "pytest-xdist", "python": "python>=3.8.0", "ray[tune]": "ray[tune]", "regex": "regex!=2019.12.17", "requests": "requests", "rhoknp": "rhoknp>=1.1.0,<1.3.1", "rjieba": "rjieba", "rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1", "ruff": "ruff>=0.0.241,<=0.0.259", "sacrebleu": "sacrebleu>=1.4.12,<2.0.0", "sacremoses": "sacremoses", "safetensors": "safetensors>=0.3.1", "sagemaker": "sagemaker>=2.31.0", "scikit-learn": "scikit-learn", "sentencepiece": "sentencepiece>=0.1.91,!=0.1.92", "sigopt": "sigopt", "starlette": "starlette", "sudachipy": "sudachipy>=0.6.6", "sudachidict_core": "sudachidict_core>=20220729", "tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14", "tensorflow": "tensorflow>=2.6,<2.14", "tensorflow-text": "tensorflow-text<2.14", "tf2onnx": "tf2onnx", "timeout-decorator": "timeout-decorator", "timm": "timm", "tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14", "torch": "torch>=1.9,!=1.12.0", "torchaudio": "torchaudio", "torchvision": "torchvision", "pyctcdecode": "pyctcdecode>=0.4.0", "tqdm": "tqdm>=4.27", "unidic": "unidic>=1.0.2", "unidic_lite": "unidic_lite>=1.0.7", "urllib3": "urllib3<2.0.0", "uvicorn": "uvicorn", }
668
"""simple docstring""" from sympy import diff, lambdify, symbols from sympy.functions import * # noqa: F403 def __a ( A , A , A = "x" , A = 10**-10 , A = 1 , ): '''simple docstring''' lowercase__ = symbols(A ) lowercase__ = lambdify(A , A ) lowercase__ = lambdify(A , diff(A , A ) ) lowercase__ = starting_point while True: if diff_function(A ) != 0: lowercase__ = prev_guess - multiplicity * func(A ) / diff_function( A ) else: raise ZeroDivisionError("Could not find root" ) from None # Precision is checked by comparing the difference of consecutive guesses if abs(next_guess - prev_guess ) < precision: return next_guess lowercase__ = next_guess # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(F'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}') # Find root of polynomial # Find fourth Root of 5 print(F'The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}') # Find value of e print( "The root of log(y) - 1 = 0 is ", F'{newton_raphson("log(y) - 1", 2, variable="y")}', ) # Exponential Roots print( "The root of exp(x) - 1 = 0 is", F'{newton_raphson("exp(x) - 1", 1_0, precision=0.005)}', ) # Find root of cos(x) print(F'The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}')
668
1
"""simple docstring""" import numpy as np def __a ( A , A , A , A , A ): '''simple docstring''' lowercase__ = int(np.ceil((x_end - xa) / h ) ) lowercase__ = np.zeros((n + 1,) ) lowercase__ = ya lowercase__ = xa for k in range(A ): lowercase__ = f(A , y[k] ) lowercase__ = f(x + 0.5 * h , y[k] + 0.5 * h * ka ) lowercase__ = f(x + 0.5 * h , y[k] + 0.5 * h * ka ) lowercase__ = f(x + h , y[k] + h * ka ) lowercase__ = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka) x += h return y if __name__ == "__main__": import doctest doctest.testmod()
668
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase_: Union[str, Any] = { "configuration_distilbert": [ "DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DistilBertConfig", "DistilBertOnnxConfig", ], "tokenization_distilbert": ["DistilBertTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_: Union[str, Any] = ["DistilBertTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_: Any = [ "DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "DistilBertForMaskedLM", "DistilBertForMultipleChoice", "DistilBertForQuestionAnswering", "DistilBertForSequenceClassification", "DistilBertForTokenClassification", "DistilBertModel", "DistilBertPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_: Tuple = [ "TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFDistilBertForMaskedLM", "TFDistilBertForMultipleChoice", "TFDistilBertForQuestionAnswering", "TFDistilBertForSequenceClassification", "TFDistilBertForTokenClassification", "TFDistilBertMainLayer", "TFDistilBertModel", "TFDistilBertPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_: Optional[Any] = [ "FlaxDistilBertForMaskedLM", "FlaxDistilBertForMultipleChoice", "FlaxDistilBertForQuestionAnswering", "FlaxDistilBertForSequenceClassification", "FlaxDistilBertForTokenClassification", "FlaxDistilBertModel", "FlaxDistilBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_distilbert import ( DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertOnnxConfig, ) from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel, ) else: import sys lowerCAmelCase_: Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
668
1
"""simple docstring""" import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class a__ ( _a ): snake_case_ = (IPNDMScheduler,) snake_case_ = (("num_inference_steps", 50),) def snake_case__ ( self, **_UpperCAmelCase ): '''simple docstring''' lowercase__ = {"num_train_timesteps": 1000} config.update(**_UpperCAmelCase ) return config def snake_case__ ( self, _UpperCAmelCase=0, **_UpperCAmelCase ): '''simple docstring''' lowercase__ = dict(self.forward_default_kwargs ) lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase ) lowercase__ = self.dummy_sample lowercase__ = 0.1 * sample lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: lowercase__ = self.get_scheduler_config(**_UpperCAmelCase ) lowercase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals lowercase__ = dummy_past_residuals[:] if time_step is None: lowercase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_UpperCAmelCase ) lowercase__ = scheduler_class.from_pretrained(_UpperCAmelCase ) new_scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals lowercase__ = dummy_past_residuals[:] lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def snake_case__ ( self ): '''simple docstring''' pass def snake_case__ ( self, _UpperCAmelCase=0, **_UpperCAmelCase ): '''simple docstring''' lowercase__ = dict(self.forward_default_kwargs ) lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase ) lowercase__ = self.dummy_sample lowercase__ = 0.1 * sample lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: lowercase__ = self.get_scheduler_config() lowercase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals (must be after setting timesteps) lowercase__ = dummy_past_residuals[:] if time_step is None: lowercase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_UpperCAmelCase ) lowercase__ = scheduler_class.from_pretrained(_UpperCAmelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residual (must be after setting timesteps) lowercase__ = dummy_past_residuals[:] lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def snake_case__ ( self, **_UpperCAmelCase ): '''simple docstring''' lowercase__ = self.scheduler_classes[0] lowercase__ = self.get_scheduler_config(**_UpperCAmelCase ) lowercase__ = scheduler_class(**_UpperCAmelCase ) lowercase__ = 10 lowercase__ = self.dummy_model() lowercase__ = self.dummy_sample_deter scheduler.set_timesteps(_UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): lowercase__ = model(_UpperCAmelCase, _UpperCAmelCase ) lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ).prev_sample for i, t in enumerate(scheduler.timesteps ): lowercase__ = model(_UpperCAmelCase, _UpperCAmelCase ) lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ).prev_sample return sample def snake_case__ ( self ): '''simple docstring''' lowercase__ = dict(self.forward_default_kwargs ) lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase ) for scheduler_class in self.scheduler_classes: lowercase__ = self.get_scheduler_config() lowercase__ = scheduler_class(**_UpperCAmelCase ) lowercase__ = self.dummy_sample lowercase__ = 0.1 * sample if num_inference_steps is not None and hasattr(_UpperCAmelCase, "set_timesteps" ): scheduler.set_timesteps(_UpperCAmelCase ) elif num_inference_steps is not None and not hasattr(_UpperCAmelCase, "set_timesteps" ): lowercase__ = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] lowercase__ = dummy_past_residuals[:] lowercase__ = scheduler.timesteps[5] lowercase__ = scheduler.timesteps[6] lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample self.assertEqual(output_a.shape, sample.shape ) self.assertEqual(output_a.shape, output_a.shape ) lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample self.assertEqual(output_a.shape, sample.shape ) self.assertEqual(output_a.shape, output_a.shape ) def snake_case__ ( self ): '''simple docstring''' for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=_UpperCAmelCase, time_step=_UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100] ): self.check_over_forward(num_inference_steps=_UpperCAmelCase, time_step=_UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.full_loop() lowercase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_mean.item() - 254_0529 ) < 10
668
"""simple docstring""" from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging lowerCAmelCase_: Union[str, Any] = logging.get_logger(__name__) class a__ ( _a ): snake_case_ = ["audio_values", "audio_mask"] def __init__( self, _UpperCAmelCase=2048, _UpperCAmelCase=1, _UpperCAmelCase=[16, 16], _UpperCAmelCase=128, _UpperCAmelCase=4_4100, _UpperCAmelCase=86, _UpperCAmelCase=2048, _UpperCAmelCase=0.0, **_UpperCAmelCase, ): '''simple docstring''' super().__init__( feature_size=_UpperCAmelCase, sampling_rate=_UpperCAmelCase, padding_value=_UpperCAmelCase, **_UpperCAmelCase, ) lowercase__ = spectrogram_length lowercase__ = num_channels lowercase__ = patch_size lowercase__ = feature_size // self.patch_size[1] lowercase__ = n_fft lowercase__ = sampling_rate // hop_length_to_sampling_rate lowercase__ = sampling_rate lowercase__ = padding_value lowercase__ = mel_filter_bank( num_frequency_bins=1 + n_fft // 2, num_mel_filters=_UpperCAmelCase, min_frequency=0.0, max_frequency=22_050.0, sampling_rate=_UpperCAmelCase, norm="slaney", mel_scale="slaney", ).T def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' lowercase__ = spectrogram( _UpperCAmelCase, window_function(self.n_fft, "hann" ), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters.T, log_mel="dB", db_range=80.0, ) lowercase__ = log_spec[:, :-1] lowercase__ = log_spec - 20.0 lowercase__ = np.clip(log_spec / 40.0, -2.0, 0.0 ) + 1.0 return log_spec def __call__( self, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = True, _UpperCAmelCase = None, _UpperCAmelCase = False, _UpperCAmelCase = False, **_UpperCAmelCase, ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( "This feature extractor is set to support sampling rate" F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled''' F''' with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) lowercase__ = isinstance(_UpperCAmelCase, np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) lowercase__ = is_batched_numpy or ( isinstance(_UpperCAmelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) )) ) if is_batched: lowercase__ = [np.asarray([speech], dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(_UpperCAmelCase, np.ndarray ): lowercase__ = np.asarray(_UpperCAmelCase, dtype=np.floataa ) elif isinstance(_UpperCAmelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowercase__ = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowercase__ = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis lowercase__ = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0], _UpperCAmelCase ): lowercase__ = [np.asarray(_UpperCAmelCase, dtype=np.floataa ) for feature in audio_features] # Create audio attention mask lowercase__ = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: lowercase__ = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] lowercase__ = np.array(_UpperCAmelCase ).astype(np.floataa ) # convert into correct format for padding lowercase__ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch lowercase__ = np.ones([len(_UpperCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) lowercase__ = padded_audio_features * self.padding_value for i in range(len(_UpperCAmelCase ) ): lowercase__ = audio_features[i] lowercase__ = feature # return as BatchFeature if return_attention_mask: lowercase__ = {"audio_values": padded_audio_features, "audio_mask": audio_mask} else: lowercase__ = {"audio_values": padded_audio_features} lowercase__ = BatchFeature(data=_UpperCAmelCase, tensor_type=_UpperCAmelCase ) return encoded_inputs
668
1
"""simple docstring""" from collections import deque class a__ : def __init__( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' lowercase__ = process_name # process name lowercase__ = arrival_time # arrival time of the process # completion time of finished process or last interrupted time lowercase__ = arrival_time lowercase__ = burst_time # remaining burst time lowercase__ = 0 # total time of the process wait in ready queue lowercase__ = 0 # time from arrival time to completion time class a__ : def __init__( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ): '''simple docstring''' lowercase__ = number_of_queues # time slice of queues that round robin algorithm applied lowercase__ = time_slices # unfinished process is in this ready_queue lowercase__ = queue # current time lowercase__ = current_time # finished process is in this sequence queue lowercase__ = deque() def snake_case__ ( self ): '''simple docstring''' lowercase__ = [] for i in range(len(self.finish_queue ) ): sequence.append(self.finish_queue[i].process_name ) return sequence def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' lowercase__ = [] for i in range(len(_UpperCAmelCase ) ): waiting_times.append(queue[i].waiting_time ) return waiting_times def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' lowercase__ = [] for i in range(len(_UpperCAmelCase ) ): turnaround_times.append(queue[i].turnaround_time ) return turnaround_times def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' lowercase__ = [] for i in range(len(_UpperCAmelCase ) ): completion_times.append(queue[i].stop_time ) return completion_times def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' return [q.burst_time for q in queue] def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' process.waiting_time += self.current_time - process.stop_time return process.waiting_time def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' lowercase__ = deque() # sequence deque of finished process while len(_UpperCAmelCase ) != 0: lowercase__ = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of current process self.update_waiting_time(_UpperCAmelCase ) # update current time self.current_time += cp.burst_time # finish the process and set the process's burst-time 0 lowercase__ = 0 # set the process's turnaround time because it is finished lowercase__ = self.current_time - cp.arrival_time # set the completion time lowercase__ = self.current_time # add the process to queue that has finished queue finished.append(_UpperCAmelCase ) self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue # FCFS will finish all remaining processes return finished def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' lowercase__ = deque() # sequence deque of terminated process # just for 1 cycle and unfinished processes will go back to queue for _ in range(len(_UpperCAmelCase ) ): lowercase__ = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of unfinished processes self.update_waiting_time(_UpperCAmelCase ) # if the burst time of process is bigger than time-slice if cp.burst_time > time_slice: # use CPU for only time-slice self.current_time += time_slice # update remaining burst time cp.burst_time -= time_slice # update end point time lowercase__ = self.current_time # locate the process behind the queue because it is not finished ready_queue.append(_UpperCAmelCase ) else: # use CPU for remaining burst time self.current_time += cp.burst_time # set burst time 0 because the process is finished lowercase__ = 0 # set the finish time lowercase__ = self.current_time # update the process' turnaround time because it is finished lowercase__ = self.current_time - cp.arrival_time # add the process to queue that has finished queue finished.append(_UpperCAmelCase ) self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue # return finished processes queue and remaining processes queue return finished, ready_queue def snake_case__ ( self ): '''simple docstring''' for i in range(self.number_of_queues - 1 ): lowercase__ , lowercase__ = self.round_robin( self.ready_queue, self.time_slices[i] ) # the last queue has first_come_first_served algorithm self.first_come_first_served(self.ready_queue ) return self.finish_queue if __name__ == "__main__": import doctest lowerCAmelCase_: Optional[int] = Process("P1", 0, 5_3) lowerCAmelCase_: Union[str, Any] = Process("P2", 0, 1_7) lowerCAmelCase_: str = Process("P3", 0, 6_8) lowerCAmelCase_: int = Process("P4", 0, 2_4) lowerCAmelCase_: Dict = 3 lowerCAmelCase_: Any = [1_7, 2_5] lowerCAmelCase_: Tuple = deque([Pa, Pa, Pa, Pa]) if len(time_slices) != number_of_queues - 1: raise SystemExit(0) doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])}) lowerCAmelCase_: Any = Process("P1", 0, 5_3) lowerCAmelCase_: Tuple = Process("P2", 0, 1_7) lowerCAmelCase_: Optional[int] = Process("P3", 0, 6_8) lowerCAmelCase_: List[Any] = Process("P4", 0, 2_4) lowerCAmelCase_: Union[str, Any] = 3 lowerCAmelCase_: Any = [1_7, 2_5] lowerCAmelCase_: Optional[Any] = deque([Pa, Pa, Pa, Pa]) lowerCAmelCase_: Union[str, Any] = MLFQ(number_of_queues, time_slices, queue, 0) lowerCAmelCase_: Tuple = mlfq.multi_level_feedback_queue() # print total waiting times of processes(P1, P2, P3, P4) print( F'waiting time:\ \t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}' ) # print completion times of processes(P1, P2, P3, P4) print( F'completion time:\ \t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}' ) # print total turnaround times of processes(P1, P2, P3, P4) print( F'turnaround time:\ \t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}' ) # print sequence of finished processes print( F'sequence of finished processes:\ {mlfq.calculate_sequence_of_finish_queue()}' )
668
"""simple docstring""" from __future__ import annotations import math def __a ( A ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True lowerCAmelCase_: Optional[Any] = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)] def __a ( A ): '''simple docstring''' if not isinstance(A , A ): raise ValueError("n must be an integer" ) if n <= 0: raise ValueError("n must be >= 0" ) lowercase__ = [] for num in range(len(A ) ): lowercase__ = 0 while 2 * i * i <= odd_composites[num]: lowercase__ = odd_composites[num] - 2 * i * i if is_prime(A ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(A ) == n: return list_nums return [] def __a ( ): '''simple docstring''' return compute_nums(1 )[0] if __name__ == "__main__": print(F'{solution() = }')
668
1
"""simple docstring""" import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, ByTaTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): lowerCAmelCase_: Dict = "pt" elif is_tf_available(): lowerCAmelCase_: Dict = "tf" else: lowerCAmelCase_: str = "jax" class a__ ( _a , unittest.TestCase ): snake_case_ = ByTaTokenizer snake_case_ = False def snake_case__ ( self ): '''simple docstring''' super().setUp() lowercase__ = ByTaTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def snake_case__ ( self ): '''simple docstring''' return ByTaTokenizer.from_pretrained("google/byt5-small" ) def snake_case__ ( self, **_UpperCAmelCase ): '''simple docstring''' return self.tokenizer_class.from_pretrained(self.tmpdirname, **_UpperCAmelCase ) def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase=False, _UpperCAmelCase=20, _UpperCAmelCase=5 ): '''simple docstring''' lowercase__ = [] for i in range(len(_UpperCAmelCase ) ): try: lowercase__ = tokenizer.decode([i], clean_up_tokenization_spaces=_UpperCAmelCase ) except UnicodeDecodeError: pass toks.append((i, tok) ) lowercase__ = list(filter(lambda _UpperCAmelCase : re.match(R"^[ a-zA-Z]+$", t[1] ), _UpperCAmelCase ) ) lowercase__ = list(filter(lambda _UpperCAmelCase : [t[0]] == tokenizer.encode(t[1], add_special_tokens=_UpperCAmelCase ), _UpperCAmelCase ) ) if max_length is not None and len(_UpperCAmelCase ) > max_length: lowercase__ = toks[:max_length] if min_length is not None and len(_UpperCAmelCase ) < min_length and len(_UpperCAmelCase ) > 0: while len(_UpperCAmelCase ) < min_length: lowercase__ = toks + toks # toks_str = [t[1] for t in toks] lowercase__ = [t[0] for t in toks] # Ensure consistency lowercase__ = tokenizer.decode(_UpperCAmelCase, clean_up_tokenization_spaces=_UpperCAmelCase ) if " " not in output_txt and len(_UpperCAmelCase ) > 1: lowercase__ = ( tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=_UpperCAmelCase ) + " " + tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=_UpperCAmelCase ) ) if with_prefix_space: lowercase__ = " " + output_txt lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) return output_txt, output_ids def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.ta_base_tokenizer lowercase__ = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"] ) lowercase__ = tokenizer(["hi", "I went to the gym", ""] ) self.assertListEqual(batch_with_eos_added["input_ids"], batch_without_eos_added["input_ids"] ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.ta_base_tokenizer lowercase__ = "Unicode €." lowercase__ = tokenizer(_UpperCAmelCase ) lowercase__ = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1] self.assertEqual(encoded["input_ids"], _UpperCAmelCase ) # decoding lowercase__ = tokenizer.decode(_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase, "Unicode €.</s>" ) lowercase__ = tokenizer("e è é ê ë" ) lowercase__ = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1] self.assertEqual(encoded["input_ids"], _UpperCAmelCase ) # decoding lowercase__ = tokenizer.decode(_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase, "e è é ê ë</s>" ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ), "e è é ê ë</s>" ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.ta_base_tokenizer lowercase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."] # fmt: off lowercase__ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0] # fmt: on lowercase__ = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, return_tensors=_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase ) if FRAMEWORK != "jax": lowercase__ = list(batch.input_ids.numpy()[0] ) else: lowercase__ = list(batch.input_ids.tolist()[0] ) self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase ) self.assertEqual((2, 37), batch.input_ids.shape ) self.assertEqual((2, 37), batch.attention_mask.shape ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.ta_base_tokenizer lowercase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."] lowercase__ = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, return_tensors=_UpperCAmelCase ) # check if input_ids are returned and no decoder_input_ids self.assertIn("input_ids", _UpperCAmelCase ) self.assertIn("attention_mask", _UpperCAmelCase ) self.assertNotIn("decoder_input_ids", _UpperCAmelCase ) self.assertNotIn("decoder_attention_mask", _UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.ta_base_tokenizer lowercase__ = [ "Summary of the text.", "Another summary.", ] lowercase__ = tokenizer( text_target=_UpperCAmelCase, max_length=32, padding="max_length", truncation=_UpperCAmelCase, return_tensors=_UpperCAmelCase ) self.assertEqual(32, targets["input_ids"].shape[1] ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.ta_base_tokenizer lowercase__ = ["A long paragraph for summarization. </s>"] lowercase__ = ["Summary of the text. </s>"] # fmt: off lowercase__ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1] lowercase__ = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1] # fmt: on lowercase__ = tokenizer(_UpperCAmelCase, text_target=_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase, batch["input_ids"][0] ) self.assertEqual(_UpperCAmelCase, batch["labels"][0] ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length, 42 ) # Now let's start the test lowercase__ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc lowercase__ = tempfile.mkdtemp() lowercase__ = " He is very happy, UNwant\u00E9d,running" lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) tokenizer.save_pretrained(_UpperCAmelCase ) lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase ) lowercase__ = after_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase ) shutil.rmtree(_UpperCAmelCase ) lowercase__ = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc lowercase__ = tempfile.mkdtemp() lowercase__ = " He is very happy, UNwant\u00E9d,running" tokenizer.add_tokens(["bim", "bambam"] ) lowercase__ = tokenizer.additional_special_tokens additional_special_tokens.append("new_additional_special_token" ) tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} ) lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) tokenizer.save_pretrained(_UpperCAmelCase ) lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase ) lowercase__ = after_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase ) self.assertIn("new_additional_special_token", after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length, 42 ) lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase, model_max_length=43 ) self.assertEqual(tokenizer.model_max_length, 43 ) shutil.rmtree(_UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(_UpperCAmelCase ) with open(os.path.join(_UpperCAmelCase, "special_tokens_map.json" ), encoding="utf-8" ) as json_file: lowercase__ = json.load(_UpperCAmelCase ) with open(os.path.join(_UpperCAmelCase, "tokenizer_config.json" ), encoding="utf-8" ) as json_file: lowercase__ = json.load(_UpperCAmelCase ) lowercase__ = [F'''<extra_id_{i}>''' for i in range(125 )] lowercase__ = added_tokens_extra_ids + [ "an_additional_special_token" ] lowercase__ = added_tokens_extra_ids + [ "an_additional_special_token" ] with open(os.path.join(_UpperCAmelCase, "special_tokens_map.json" ), "w", encoding="utf-8" ) as outfile: json.dump(_UpperCAmelCase, _UpperCAmelCase ) with open(os.path.join(_UpperCAmelCase, "tokenizer_config.json" ), "w", encoding="utf-8" ) as outfile: json.dump(_UpperCAmelCase, _UpperCAmelCase ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files lowercase__ = tokenizer_class.from_pretrained( _UpperCAmelCase, ) self.assertIn( "an_additional_special_token", tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( ["an_additional_special_token"], tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ), ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained lowercase__ = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token", lstrip=_UpperCAmelCase )] lowercase__ = tokenizer_class.from_pretrained( _UpperCAmelCase, additional_special_tokens=_UpperCAmelCase, ) self.assertIn("a_new_additional_special_token", tokenizer.additional_special_tokens ) self.assertEqual( ["a_new_additional_special_token"], tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ), ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(_UpperCAmelCase ) lowercase__ = tokenizer_class.from_pretrained(_UpperCAmelCase ) self.assertTrue(tokenizer.decode([255] ) == "" ) def snake_case__ ( self ): '''simple docstring''' pass def snake_case__ ( self ): '''simple docstring''' pass def snake_case__ ( self ): '''simple docstring''' pass def snake_case__ ( self ): '''simple docstring''' pass def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.get_tokenizers(fast=_UpperCAmelCase, do_lower_case=_UpperCAmelCase ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): lowercase__ = ["t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "x", "t", "</s>"] lowercase__ = tokenizer.convert_tokens_to_string(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): lowercase__ = [ "bos_token", "eos_token", "unk_token", "sep_token", "pad_token", "cls_token", "mask_token", ] lowercase__ = 0 lowercase__ = tokenizer.convert_ids_to_tokens( _UpperCAmelCase, skip_special_tokens=_UpperCAmelCase ) for attr in attributes_list: setattr(_UpperCAmelCase, attr + "_id", _UpperCAmelCase ) self.assertEqual(getattr(_UpperCAmelCase, _UpperCAmelCase ), _UpperCAmelCase ) self.assertEqual(getattr(_UpperCAmelCase, attr + "_id" ), _UpperCAmelCase ) setattr(_UpperCAmelCase, attr + "_id", _UpperCAmelCase ) self.assertEqual(getattr(_UpperCAmelCase, _UpperCAmelCase ), _UpperCAmelCase ) self.assertEqual(getattr(_UpperCAmelCase, attr + "_id" ), _UpperCAmelCase ) setattr(_UpperCAmelCase, "additional_special_tokens_ids", [] ) self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens" ), [] ) self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens_ids" ), [] ) setattr(_UpperCAmelCase, "additional_special_tokens_ids", [token_id_to_test_setters] ) self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens" ), [token_to_test_setters] ) self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens_ids" ), [token_id_to_test_setters] )
668
"""simple docstring""" import os import sys lowerCAmelCase_: Any = os.path.join(os.path.dirname(__file__), "src") sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) lowerCAmelCase_: Union[str, Any] = [ "torch", "numpy", "tokenizers", "filelock", "requests", "tqdm", "regex", "sentencepiece", "sacremoses", "importlib_metadata", "huggingface_hub", ] @add_start_docstrings(AutoConfig.__doc__ ) def __a ( *A , **A ): '''simple docstring''' return AutoConfig.from_pretrained(*A , **A ) @add_start_docstrings(AutoTokenizer.__doc__ ) def __a ( *A , **A ): '''simple docstring''' return AutoTokenizer.from_pretrained(*A , **A ) @add_start_docstrings(AutoModel.__doc__ ) def __a ( *A , **A ): '''simple docstring''' return AutoModel.from_pretrained(*A , **A ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def __a ( *A , **A ): '''simple docstring''' return AutoModelForCausalLM.from_pretrained(*A , **A ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def __a ( *A , **A ): '''simple docstring''' return AutoModelForMaskedLM.from_pretrained(*A , **A ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def __a ( *A , **A ): '''simple docstring''' return AutoModelForSequenceClassification.from_pretrained(*A , **A ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def __a ( *A , **A ): '''simple docstring''' return AutoModelForQuestionAnswering.from_pretrained(*A , **A )
668
1
"""simple docstring""" # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. lowerCAmelCase_: int = abspath(join(dirname(__file__), "src")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="ignore", category=FutureWarning) def __a ( A ): '''simple docstring''' config.addinivalue_line( "markers" , "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested" ) config.addinivalue_line( "markers" , "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested" ) config.addinivalue_line("markers" , "is_pipeline_test: mark test to run only when pipelines are tested" ) config.addinivalue_line("markers" , "is_staging_test: mark test to run only in the staging environment" ) config.addinivalue_line("markers" , "accelerate_tests: mark test that require accelerate" ) config.addinivalue_line("markers" , "tool_tests: mark the tool tests that are run on their specific schedule" ) def __a ( A ): '''simple docstring''' from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(A ) def __a ( A ): '''simple docstring''' from transformers.testing_utils import pytest_terminal_summary_main lowercase__ = terminalreporter.config.getoption("--make-reports" ) if make_reports: pytest_terminal_summary_main(A , id=A ) def __a ( A , A ): '''simple docstring''' if exitstatus == 5: lowercase__ = 0 # Doctest custom flag to ignore output. lowerCAmelCase_: int = doctest.register_optionflag("IGNORE_RESULT") lowerCAmelCase_: Optional[int] = doctest.OutputChecker class a__ ( _a ): def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) lowerCAmelCase_: int = CustomOutputChecker lowerCAmelCase_: Dict = HfDoctestModule lowerCAmelCase_: Any = HfDocTestParser
668
"""simple docstring""" import unittest from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow if is_flax_available(): import jax from transformers.models.auto.modeling_flax_auto import FlaxAutoModel from transformers.models.bert.modeling_flax_bert import FlaxBertModel from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel @require_flax class a__ ( unittest.TestCase ): @slow def snake_case__ ( self ): '''simple docstring''' for model_name in ["bert-base-cased", "bert-large-uncased"]: with self.subTest(_UpperCAmelCase ): lowercase__ = AutoConfig.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase ) lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase ) @slow def snake_case__ ( self ): '''simple docstring''' for model_name in ["roberta-base", "roberta-large"]: with self.subTest(_UpperCAmelCase ): lowercase__ = AutoConfig.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase ) lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase ) @slow def snake_case__ ( self ): '''simple docstring''' for model_name in ["bert-base-cased", "bert-large-uncased"]: lowercase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase ) lowercase__ = FlaxBertModel.from_pretrained(_UpperCAmelCase ) lowercase__ = tokenizer("Do you support jax jitted function?", return_tensors=TensorType.JAX ) @jax.jit def eval(**_UpperCAmelCase ): return model(**_UpperCAmelCase ) eval(**_UpperCAmelCase ).block_until_ready() @slow def snake_case__ ( self ): '''simple docstring''' for model_name in ["roberta-base", "roberta-large"]: lowercase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase ) lowercase__ = FlaxRobertaModel.from_pretrained(_UpperCAmelCase ) lowercase__ = tokenizer("Do you support jax jitted function?", return_tensors=TensorType.JAX ) @jax.jit def eval(**_UpperCAmelCase ): return model(**_UpperCAmelCase ) eval(**_UpperCAmelCase ).block_until_ready() def snake_case__ ( self ): '''simple docstring''' with self.assertRaisesRegex( _UpperCAmelCase, "bert-base is not a local folder and is not a valid model identifier" ): lowercase__ = FlaxAutoModel.from_pretrained("bert-base" ) def snake_case__ ( self ): '''simple docstring''' with self.assertRaisesRegex( _UpperCAmelCase, R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase, revision="aaaaaa" ) def snake_case__ ( self ): '''simple docstring''' with self.assertRaisesRegex( _UpperCAmelCase, "hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack", ): lowercase__ = FlaxAutoModel.from_pretrained("hf-internal-testing/config-no-model" ) def snake_case__ ( self ): '''simple docstring''' with self.assertRaisesRegex(_UpperCAmelCase, "Use `from_pt=True` to load this model" ): lowercase__ = FlaxAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
668
1
"""simple docstring""" import os import sys lowerCAmelCase_: Any = os.path.join(os.path.dirname(__file__), "src") sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) lowerCAmelCase_: Union[str, Any] = [ "torch", "numpy", "tokenizers", "filelock", "requests", "tqdm", "regex", "sentencepiece", "sacremoses", "importlib_metadata", "huggingface_hub", ] @add_start_docstrings(AutoConfig.__doc__ ) def __a ( *A , **A ): '''simple docstring''' return AutoConfig.from_pretrained(*A , **A ) @add_start_docstrings(AutoTokenizer.__doc__ ) def __a ( *A , **A ): '''simple docstring''' return AutoTokenizer.from_pretrained(*A , **A ) @add_start_docstrings(AutoModel.__doc__ ) def __a ( *A , **A ): '''simple docstring''' return AutoModel.from_pretrained(*A , **A ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def __a ( *A , **A ): '''simple docstring''' return AutoModelForCausalLM.from_pretrained(*A , **A ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def __a ( *A , **A ): '''simple docstring''' return AutoModelForMaskedLM.from_pretrained(*A , **A ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def __a ( *A , **A ): '''simple docstring''' return AutoModelForSequenceClassification.from_pretrained(*A , **A ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def __a ( *A , **A ): '''simple docstring''' return AutoModelForQuestionAnswering.from_pretrained(*A , **A )
668
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase_: str = logging.get_logger(__name__) lowerCAmelCase_: List[Any] = { "facebook/data2vec-vision-base-ft": ( "https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json" ), } class a__ ( _a ): snake_case_ = "data2vec-vision" def __init__( self, _UpperCAmelCase=768, _UpperCAmelCase=12, _UpperCAmelCase=12, _UpperCAmelCase=3072, _UpperCAmelCase="gelu", _UpperCAmelCase=0.0, _UpperCAmelCase=0.0, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-12, _UpperCAmelCase=224, _UpperCAmelCase=16, _UpperCAmelCase=3, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=True, _UpperCAmelCase=[3, 5, 7, 11], _UpperCAmelCase=[1, 2, 3, 6], _UpperCAmelCase=True, _UpperCAmelCase=0.4, _UpperCAmelCase=256, _UpperCAmelCase=1, _UpperCAmelCase=False, _UpperCAmelCase=255, **_UpperCAmelCase, ): '''simple docstring''' super().__init__(**_UpperCAmelCase ) lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = initializer_range lowercase__ = layer_norm_eps lowercase__ = image_size lowercase__ = patch_size lowercase__ = num_channels lowercase__ = use_mask_token lowercase__ = use_absolute_position_embeddings lowercase__ = use_relative_position_bias lowercase__ = use_shared_relative_position_bias lowercase__ = layer_scale_init_value lowercase__ = drop_path_rate lowercase__ = use_mean_pooling # decode head attributes (semantic segmentation) lowercase__ = out_indices lowercase__ = pool_scales # auxiliary head attributes (semantic segmentation) lowercase__ = use_auxiliary_head lowercase__ = auxiliary_loss_weight lowercase__ = auxiliary_channels lowercase__ = auxiliary_num_convs lowercase__ = auxiliary_concat_input lowercase__ = semantic_loss_ignore_index class a__ ( _a ): snake_case_ = version.parse("1.11" ) @property def snake_case__ ( self ): '''simple docstring''' return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def snake_case__ ( self ): '''simple docstring''' return 1E-4
668
1
"""simple docstring""" from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_: Tuple = logging.get_logger(__name__) # TODO Update this lowerCAmelCase_: Any = { "facebook/esm-1b": "https://huggingface.co/facebook/esm-1b/resolve/main/config.json", # See all ESM models at https://huggingface.co/models?filter=esm } class a__ ( _a ): snake_case_ = "esm" def __init__( self, _UpperCAmelCase=None, _UpperCAmelCase=None, _UpperCAmelCase=None, _UpperCAmelCase=768, _UpperCAmelCase=12, _UpperCAmelCase=12, _UpperCAmelCase=3072, _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=1026, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-12, _UpperCAmelCase="absolute", _UpperCAmelCase=True, _UpperCAmelCase=None, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=None, _UpperCAmelCase=None, **_UpperCAmelCase, ): '''simple docstring''' super().__init__(pad_token_id=_UpperCAmelCase, mask_token_id=_UpperCAmelCase, **_UpperCAmelCase ) lowercase__ = vocab_size lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = intermediate_size lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = max_position_embeddings lowercase__ = initializer_range lowercase__ = layer_norm_eps lowercase__ = position_embedding_type lowercase__ = use_cache lowercase__ = emb_layer_norm_before lowercase__ = token_dropout lowercase__ = is_folding_model if is_folding_model: if esmfold_config is None: logger.info("No esmfold_config supplied for folding model, using default values." ) lowercase__ = EsmFoldConfig() elif isinstance(_UpperCAmelCase, _UpperCAmelCase ): lowercase__ = EsmFoldConfig(**_UpperCAmelCase ) lowercase__ = esmfold_config if vocab_list is None: logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" ) lowercase__ = get_default_vocab_list() else: lowercase__ = vocab_list else: lowercase__ = None lowercase__ = None if self.esmfold_config is not None and getattr(self.esmfold_config, "use_esm_attn_map", _UpperCAmelCase ): raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = super().to_dict() if isinstance(self.esmfold_config, _UpperCAmelCase ): lowercase__ = self.esmfold_config.to_dict() return output @dataclass class a__ : snake_case_ = None snake_case_ = True snake_case_ = False snake_case_ = False snake_case_ = False snake_case_ = 0 snake_case_ = True snake_case_ = False snake_case_ = 128 snake_case_ = None def snake_case__ ( self ): '''simple docstring''' if self.trunk is None: lowercase__ = TrunkConfig() elif isinstance(self.trunk, _UpperCAmelCase ): lowercase__ = TrunkConfig(**self.trunk ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = asdict(self ) lowercase__ = self.trunk.to_dict() return output @dataclass class a__ : snake_case_ = 48 snake_case_ = 1024 snake_case_ = 128 snake_case_ = 32 snake_case_ = 32 snake_case_ = 32 snake_case_ = 0 snake_case_ = 0 snake_case_ = False snake_case_ = 4 snake_case_ = 128 snake_case_ = None def snake_case__ ( self ): '''simple docstring''' if self.structure_module is None: lowercase__ = StructureModuleConfig() elif isinstance(self.structure_module, _UpperCAmelCase ): lowercase__ = StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( "`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got" F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( "`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got" F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' ) lowercase__ = self.sequence_state_dim // self.sequence_head_width lowercase__ = self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( "`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got" F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( "`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got" F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' ) if self.pairwise_state_dim % 2 != 0: raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' ) if self.dropout >= 0.4: raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = asdict(self ) lowercase__ = self.structure_module.to_dict() return output @dataclass class a__ : snake_case_ = 384 snake_case_ = 128 snake_case_ = 16 snake_case_ = 128 snake_case_ = 12 snake_case_ = 4 snake_case_ = 8 snake_case_ = 0.1 snake_case_ = 8 snake_case_ = 1 snake_case_ = 2 snake_case_ = 7 snake_case_ = 10 snake_case_ = 1e-8 snake_case_ = 1e5 def snake_case__ ( self ): '''simple docstring''' return asdict(self ) def __a ( ): '''simple docstring''' return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
668
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_: List[Any] = logging.get_logger(__name__) lowerCAmelCase_: int = { "microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json", "microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json", } class a__ ( _a ): snake_case_ = "markuplm" def __init__( self, _UpperCAmelCase=3_0522, _UpperCAmelCase=768, _UpperCAmelCase=12, _UpperCAmelCase=12, _UpperCAmelCase=3072, _UpperCAmelCase="gelu", _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=512, _UpperCAmelCase=2, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-12, _UpperCAmelCase=0, _UpperCAmelCase=0, _UpperCAmelCase=2, _UpperCAmelCase=256, _UpperCAmelCase=1024, _UpperCAmelCase=216, _UpperCAmelCase=1001, _UpperCAmelCase=32, _UpperCAmelCase=50, _UpperCAmelCase="absolute", _UpperCAmelCase=True, _UpperCAmelCase=None, **_UpperCAmelCase, ): '''simple docstring''' super().__init__( pad_token_id=_UpperCAmelCase, bos_token_id=_UpperCAmelCase, eos_token_id=_UpperCAmelCase, **_UpperCAmelCase, ) lowercase__ = vocab_size lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = hidden_act lowercase__ = intermediate_size lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = max_position_embeddings lowercase__ = type_vocab_size lowercase__ = initializer_range lowercase__ = layer_norm_eps lowercase__ = position_embedding_type lowercase__ = use_cache lowercase__ = classifier_dropout # additional properties lowercase__ = max_depth lowercase__ = max_xpath_tag_unit_embeddings lowercase__ = max_xpath_subs_unit_embeddings lowercase__ = tag_pad_id lowercase__ = subs_pad_id lowercase__ = xpath_unit_hidden_size
668
1
"""simple docstring""" def __a ( A ): '''simple docstring''' if a < 0: raise ValueError("Input value must be a positive integer" ) elif isinstance(A , A ): raise TypeError("Input value must be a 'int' type" ) return bin(A ).count("1" ) if __name__ == "__main__": import doctest doctest.testmod()
668
"""simple docstring""" lowerCAmelCase_: Union[str, Any] = [ 9_9_9, 8_0_0, 7_9_9, 6_0_0, 5_9_9, 5_0_0, 4_0_0, 3_9_9, 3_7_7, 3_5_5, 3_3_3, 3_1_1, 2_8_8, 2_6_6, 2_4_4, 2_2_2, 2_0_0, 1_9_9, 1_7_7, 1_5_5, 1_3_3, 1_1_1, 8_8, 6_6, 4_4, 2_2, 0, ] lowerCAmelCase_: List[str] = [ 9_9_9, 9_7_6, 9_5_2, 9_2_8, 9_0_5, 8_8_2, 8_5_8, 8_5_7, 8_1_0, 7_6_2, 7_1_5, 7_1_4, 5_7_2, 4_2_9, 4_2_8, 2_8_6, 2_8_5, 2_3_8, 1_9_0, 1_4_3, 1_4_2, 1_1_8, 9_5, 7_1, 4_7, 2_4, 0, ] lowerCAmelCase_: List[str] = [ 9_9_9, 9_8_8, 9_7_7, 9_6_6, 9_5_5, 9_4_4, 9_3_3, 9_2_2, 9_1_1, 9_0_0, 8_9_9, 8_7_9, 8_5_9, 8_4_0, 8_2_0, 8_0_0, 7_9_9, 7_6_6, 7_3_3, 7_0_0, 6_9_9, 6_5_0, 6_0_0, 5_9_9, 5_0_0, 4_9_9, 4_0_0, 3_9_9, 3_5_0, 3_0_0, 2_9_9, 2_6_6, 2_3_3, 2_0_0, 1_9_9, 1_7_9, 1_5_9, 1_4_0, 1_2_0, 1_0_0, 9_9, 8_8, 7_7, 6_6, 5_5, 4_4, 3_3, 2_2, 1_1, 0, ] lowerCAmelCase_: Dict = [ 9_9_9, 9_9_5, 9_9_2, 9_8_9, 9_8_5, 9_8_1, 9_7_8, 9_7_5, 9_7_1, 9_6_7, 9_6_4, 9_6_1, 9_5_7, 9_5_6, 9_5_1, 9_4_7, 9_4_2, 9_3_7, 9_3_3, 9_2_8, 9_2_3, 9_1_9, 9_1_4, 9_1_3, 9_0_8, 9_0_3, 8_9_7, 8_9_2, 8_8_7, 8_8_1, 8_7_6, 8_7_1, 8_7_0, 8_6_4, 8_5_8, 8_5_2, 8_4_6, 8_4_0, 8_3_4, 8_2_8, 8_2_7, 8_2_0, 8_1_3, 8_0_6, 7_9_9, 7_9_2, 7_8_5, 7_8_4, 7_7_7, 7_7_0, 7_6_3, 7_5_6, 7_4_9, 7_4_2, 7_4_1, 7_3_3, 7_2_4, 7_1_6, 7_0_7, 6_9_9, 6_9_8, 6_8_8, 6_7_7, 6_6_6, 6_5_6, 6_5_5, 6_4_5, 6_3_4, 6_2_3, 6_1_3, 6_1_2, 5_9_8, 5_8_4, 5_7_0, 5_6_9, 5_5_5, 5_4_1, 5_2_7, 5_2_6, 5_0_5, 4_8_4, 4_8_3, 4_6_2, 4_4_0, 4_3_9, 3_9_6, 3_9_5, 3_5_2, 3_5_1, 3_0_8, 3_0_7, 2_6_4, 2_6_3, 2_2_0, 2_1_9, 1_7_6, 1_3_2, 8_8, 4_4, 0, ] lowerCAmelCase_: Optional[int] = [ 9_9_9, 9_9_7, 9_9_5, 9_9_2, 9_9_0, 9_8_8, 9_8_6, 9_8_4, 9_8_1, 9_7_9, 9_7_7, 9_7_5, 9_7_2, 9_7_0, 9_6_8, 9_6_6, 9_6_4, 9_6_1, 9_5_9, 9_5_7, 9_5_6, 9_5_4, 9_5_1, 9_4_9, 9_4_6, 9_4_4, 9_4_1, 9_3_9, 9_3_6, 9_3_4, 9_3_1, 9_2_9, 9_2_6, 9_2_4, 9_2_1, 9_1_9, 9_1_6, 9_1_4, 9_1_3, 9_1_0, 9_0_7, 9_0_5, 9_0_2, 8_9_9, 8_9_6, 8_9_3, 8_9_1, 8_8_8, 8_8_5, 8_8_2, 8_7_9, 8_7_7, 8_7_4, 8_7_1, 8_7_0, 8_6_7, 8_6_4, 8_6_1, 8_5_8, 8_5_5, 8_5_2, 8_4_9, 8_4_6, 8_4_3, 8_4_0, 8_3_7, 8_3_4, 8_3_1, 8_2_8, 8_2_7, 8_2_4, 8_2_1, 8_1_7, 8_1_4, 8_1_1, 8_0_8, 8_0_4, 8_0_1, 7_9_8, 7_9_5, 7_9_1, 7_8_8, 7_8_5, 7_8_4, 7_8_0, 7_7_7, 7_7_4, 7_7_0, 7_6_6, 7_6_3, 7_6_0, 7_5_6, 7_5_2, 7_4_9, 7_4_6, 7_4_2, 7_4_1, 7_3_7, 7_3_3, 7_3_0, 7_2_6, 7_2_2, 7_1_8, 7_1_4, 7_1_0, 7_0_7, 7_0_3, 6_9_9, 6_9_8, 6_9_4, 6_9_0, 6_8_5, 6_8_1, 6_7_7, 6_7_3, 6_6_9, 6_6_4, 6_6_0, 6_5_6, 6_5_5, 6_5_0, 6_4_6, 6_4_1, 6_3_6, 6_3_2, 6_2_7, 6_2_2, 6_1_8, 6_1_3, 6_1_2, 6_0_7, 6_0_2, 5_9_6, 5_9_1, 5_8_6, 5_8_0, 5_7_5, 5_7_0, 5_6_9, 5_6_3, 5_5_7, 5_5_1, 5_4_5, 5_3_9, 5_3_3, 5_2_7, 5_2_6, 5_1_9, 5_1_2, 5_0_5, 4_9_8, 4_9_1, 4_8_4, 4_8_3, 4_7_4, 4_6_6, 4_5_7, 4_4_9, 4_4_0, 4_3_9, 4_2_8, 4_1_8, 4_0_7, 3_9_6, 3_9_5, 3_8_1, 3_6_6, 3_5_2, 3_5_1, 3_3_0, 3_0_8, 3_0_7, 2_8_6, 2_6_4, 2_6_3, 2_4_2, 2_2_0, 2_1_9, 1_7_6, 1_7_5, 1_3_2, 1_3_1, 8_8, 4_4, 0, ] lowerCAmelCase_: Tuple = [ 9_9_9, 9_9_1, 9_8_2, 9_7_4, 9_6_6, 9_5_8, 9_5_0, 9_4_1, 9_3_3, 9_2_5, 9_1_6, 9_0_8, 9_0_0, 8_9_9, 8_7_4, 8_5_0, 8_2_5, 8_0_0, 7_9_9, 7_0_0, 6_0_0, 5_0_0, 4_0_0, 3_0_0, 2_0_0, 1_0_0, 0, ] lowerCAmelCase_: str = [ 9_9_9, 9_9_2, 9_8_5, 9_7_8, 9_7_1, 9_6_4, 9_5_7, 9_4_9, 9_4_2, 9_3_5, 9_2_8, 9_2_1, 9_1_4, 9_0_7, 9_0_0, 8_9_9, 8_7_9, 8_5_9, 8_4_0, 8_2_0, 8_0_0, 7_9_9, 7_6_6, 7_3_3, 7_0_0, 6_9_9, 6_5_0, 6_0_0, 5_9_9, 5_0_0, 4_9_9, 4_0_0, 3_9_9, 3_0_0, 2_9_9, 2_0_0, 1_9_9, 1_0_0, 9_9, 0, ] lowerCAmelCase_: int = [ 9_9_9, 9_9_6, 9_9_2, 9_8_9, 9_8_5, 9_8_2, 9_7_9, 9_7_5, 9_7_2, 9_6_8, 9_6_5, 9_6_1, 9_5_8, 9_5_5, 9_5_1, 9_4_8, 9_4_4, 9_4_1, 9_3_8, 9_3_4, 9_3_1, 9_2_7, 9_2_4, 9_2_0, 9_1_7, 9_1_4, 9_1_0, 9_0_7, 9_0_3, 9_0_0, 8_9_9, 8_9_1, 8_8_4, 8_7_6, 8_6_9, 8_6_1, 8_5_3, 8_4_6, 8_3_8, 8_3_0, 8_2_3, 8_1_5, 8_0_8, 8_0_0, 7_9_9, 7_8_8, 7_7_7, 7_6_6, 7_5_5, 7_4_4, 7_3_3, 7_2_2, 7_1_1, 7_0_0, 6_9_9, 6_8_8, 6_7_7, 6_6_6, 6_5_5, 6_4_4, 6_3_3, 6_2_2, 6_1_1, 6_0_0, 5_9_9, 5_8_5, 5_7_1, 5_5_7, 5_4_2, 5_2_8, 5_1_4, 5_0_0, 4_9_9, 4_8_5, 4_7_1, 4_5_7, 4_4_2, 4_2_8, 4_1_4, 4_0_0, 3_9_9, 3_7_9, 3_5_9, 3_4_0, 3_2_0, 3_0_0, 2_9_9, 2_7_9, 2_5_9, 2_4_0, 2_2_0, 2_0_0, 1_9_9, 1_6_6, 1_3_3, 1_0_0, 9_9, 6_6, 3_3, 0, ]
668
1
"""simple docstring""" import time import warnings from abc import ABC from copy import deepcopy from typing import Optional import torch from ..utils import add_start_docstrings, logging lowerCAmelCase_: Optional[int] = logging.get_logger(__name__) lowerCAmelCase_: Union[str, Any] = R"\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n" class a__ ( _a ): @add_start_docstrings(_UpperCAmelCase ) def __call__( self, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ): '''simple docstring''' raise NotImplementedError("StoppingCriteria needs to be subclassed" ) class a__ ( _a ): def __init__( self, _UpperCAmelCase, _UpperCAmelCase = None ): '''simple docstring''' lowercase__ = max_length lowercase__ = max_position_embeddings @add_start_docstrings(_UpperCAmelCase ) def __call__( self, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ): '''simple docstring''' lowercase__ = input_ids.shape[-1] lowercase__ = cur_len >= self.max_length if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings: logger.warning_once( "This is a friendly reminder - the current text generation call will exceed the model's predefined " F'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe ''' "exceptions, performance degradation, or nothing at all." ) return is_done class a__ ( _a ): def __init__( self, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' warnings.warn( "The class `MaxNewTokensCriteria` is deprecated. " F'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` ''' "with `max_length = start_length + max_new_tokens` instead.", _UpperCAmelCase, ) lowercase__ = start_length lowercase__ = max_new_tokens lowercase__ = start_length + max_new_tokens @add_start_docstrings(_UpperCAmelCase ) def __call__( self, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ): '''simple docstring''' return input_ids.shape[-1] >= self.max_length class a__ ( _a ): def __init__( self, _UpperCAmelCase, _UpperCAmelCase = None ): '''simple docstring''' lowercase__ = max_time lowercase__ = time.time() if initial_timestamp is None else initial_timestamp @add_start_docstrings(_UpperCAmelCase ) def __call__( self, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ): '''simple docstring''' return time.time() - self.initial_timestamp > self.max_time class a__ ( _a ): @add_start_docstrings(_UpperCAmelCase ) def __call__( self, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ): '''simple docstring''' return any(criteria(_UpperCAmelCase, _UpperCAmelCase ) for criteria in self ) @property def snake_case__ ( self ): '''simple docstring''' for stopping_criterium in self: if isinstance(_UpperCAmelCase, _UpperCAmelCase ): return stopping_criterium.max_length elif isinstance(_UpperCAmelCase, _UpperCAmelCase ): return stopping_criterium.max_length return None def __a ( A , A ): '''simple docstring''' lowercase__ = stopping_criteria.max_length lowercase__ = deepcopy(A ) if stopping_max_length is not None and stopping_max_length != max_length: warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , A ) elif stopping_max_length is None: new_stopping_criteria.append(MaxLengthCriteria(max_length=A ) ) return new_stopping_criteria
668
"""simple docstring""" from __future__ import annotations def __a ( A , A ): '''simple docstring''' if partitions <= 0: raise ValueError("partitions must be a positive number!" ) if partitions > number_of_bytes: raise ValueError("partitions can not > number_of_bytes!" ) lowercase__ = number_of_bytes // partitions lowercase__ = [] for i in range(A ): lowercase__ = i * bytes_per_partition + 1 lowercase__ = ( number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition ) allocation_list.append(f'''{start_bytes}-{end_bytes}''' ) return allocation_list if __name__ == "__main__": import doctest doctest.testmod()
668
1
"""simple docstring""" lowerCAmelCase_: Any = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" def __a ( A ): '''simple docstring''' if not isinstance(A , A ): lowercase__ = f'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(A ) lowercase__ = "".join(bin(A )[2:].zfill(8 ) for byte in data ) lowercase__ = len(A ) % 6 != 0 if padding_needed: # The padding that will be added later lowercase__ = b"=" * ((6 - len(A ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(A ) % 6) else: lowercase__ = b"" # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(A ) , 6 ) ).encode() + padding ) def __a ( A ): '''simple docstring''' if not isinstance(A , A ) and not isinstance(A , A ): lowercase__ = ( "argument should be a bytes-like object or ASCII string, " f'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(A ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(A , A ): try: lowercase__ = encoded_data.decode("utf-8" ) except UnicodeDecodeError: raise ValueError("base64 encoded data should only contain ASCII characters" ) lowercase__ = encoded_data.count("=" ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(A ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one lowercase__ = encoded_data[:-padding] lowercase__ = "".join( bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: lowercase__ = "".join( bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data ) lowercase__ = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(A ) , 8 ) ] return bytes(A ) if __name__ == "__main__": import doctest doctest.testmod()
668
"""simple docstring""" from collections import deque class a__ : def __init__( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' lowercase__ = process_name # process name lowercase__ = arrival_time # arrival time of the process # completion time of finished process or last interrupted time lowercase__ = arrival_time lowercase__ = burst_time # remaining burst time lowercase__ = 0 # total time of the process wait in ready queue lowercase__ = 0 # time from arrival time to completion time class a__ : def __init__( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ): '''simple docstring''' lowercase__ = number_of_queues # time slice of queues that round robin algorithm applied lowercase__ = time_slices # unfinished process is in this ready_queue lowercase__ = queue # current time lowercase__ = current_time # finished process is in this sequence queue lowercase__ = deque() def snake_case__ ( self ): '''simple docstring''' lowercase__ = [] for i in range(len(self.finish_queue ) ): sequence.append(self.finish_queue[i].process_name ) return sequence def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' lowercase__ = [] for i in range(len(_UpperCAmelCase ) ): waiting_times.append(queue[i].waiting_time ) return waiting_times def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' lowercase__ = [] for i in range(len(_UpperCAmelCase ) ): turnaround_times.append(queue[i].turnaround_time ) return turnaround_times def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' lowercase__ = [] for i in range(len(_UpperCAmelCase ) ): completion_times.append(queue[i].stop_time ) return completion_times def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' return [q.burst_time for q in queue] def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' process.waiting_time += self.current_time - process.stop_time return process.waiting_time def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' lowercase__ = deque() # sequence deque of finished process while len(_UpperCAmelCase ) != 0: lowercase__ = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of current process self.update_waiting_time(_UpperCAmelCase ) # update current time self.current_time += cp.burst_time # finish the process and set the process's burst-time 0 lowercase__ = 0 # set the process's turnaround time because it is finished lowercase__ = self.current_time - cp.arrival_time # set the completion time lowercase__ = self.current_time # add the process to queue that has finished queue finished.append(_UpperCAmelCase ) self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue # FCFS will finish all remaining processes return finished def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' lowercase__ = deque() # sequence deque of terminated process # just for 1 cycle and unfinished processes will go back to queue for _ in range(len(_UpperCAmelCase ) ): lowercase__ = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of unfinished processes self.update_waiting_time(_UpperCAmelCase ) # if the burst time of process is bigger than time-slice if cp.burst_time > time_slice: # use CPU for only time-slice self.current_time += time_slice # update remaining burst time cp.burst_time -= time_slice # update end point time lowercase__ = self.current_time # locate the process behind the queue because it is not finished ready_queue.append(_UpperCAmelCase ) else: # use CPU for remaining burst time self.current_time += cp.burst_time # set burst time 0 because the process is finished lowercase__ = 0 # set the finish time lowercase__ = self.current_time # update the process' turnaround time because it is finished lowercase__ = self.current_time - cp.arrival_time # add the process to queue that has finished queue finished.append(_UpperCAmelCase ) self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue # return finished processes queue and remaining processes queue return finished, ready_queue def snake_case__ ( self ): '''simple docstring''' for i in range(self.number_of_queues - 1 ): lowercase__ , lowercase__ = self.round_robin( self.ready_queue, self.time_slices[i] ) # the last queue has first_come_first_served algorithm self.first_come_first_served(self.ready_queue ) return self.finish_queue if __name__ == "__main__": import doctest lowerCAmelCase_: Optional[int] = Process("P1", 0, 5_3) lowerCAmelCase_: Union[str, Any] = Process("P2", 0, 1_7) lowerCAmelCase_: str = Process("P3", 0, 6_8) lowerCAmelCase_: int = Process("P4", 0, 2_4) lowerCAmelCase_: Dict = 3 lowerCAmelCase_: Any = [1_7, 2_5] lowerCAmelCase_: Tuple = deque([Pa, Pa, Pa, Pa]) if len(time_slices) != number_of_queues - 1: raise SystemExit(0) doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])}) lowerCAmelCase_: Any = Process("P1", 0, 5_3) lowerCAmelCase_: Tuple = Process("P2", 0, 1_7) lowerCAmelCase_: Optional[int] = Process("P3", 0, 6_8) lowerCAmelCase_: List[Any] = Process("P4", 0, 2_4) lowerCAmelCase_: Union[str, Any] = 3 lowerCAmelCase_: Any = [1_7, 2_5] lowerCAmelCase_: Optional[Any] = deque([Pa, Pa, Pa, Pa]) lowerCAmelCase_: Union[str, Any] = MLFQ(number_of_queues, time_slices, queue, 0) lowerCAmelCase_: Tuple = mlfq.multi_level_feedback_queue() # print total waiting times of processes(P1, P2, P3, P4) print( F'waiting time:\ \t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}' ) # print completion times of processes(P1, P2, P3, P4) print( F'completion time:\ \t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}' ) # print total turnaround times of processes(P1, P2, P3, P4) print( F'turnaround time:\ \t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}' ) # print sequence of finished processes print( F'sequence of finished processes:\ {mlfq.calculate_sequence_of_finish_queue()}' )
668
1
"""simple docstring""" import json import os from typing import Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase_: Dict = logging.get_logger(__name__) lowerCAmelCase_: Tuple = { "vocab_file": "vocab.json", "merges_file": "merges.txt", } lowerCAmelCase_: Dict = { "vocab_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"}, "merges_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"}, } lowerCAmelCase_: List[str] = { "ctrl": 2_5_6, } lowerCAmelCase_: Tuple = { "Pregnancy": 1_6_8_6_2_9, "Christianity": 7_6_7_5, "Explain": 1_0_6_4_2_3, "Fitness": 6_3_4_4_0, "Saving": 6_3_1_6_3, "Ask": 2_7_1_7_1, "Ass": 9_5_9_8_5, "Joke": 1_6_3_5_0_9, "Questions": 4_5_6_2_2, "Thoughts": 4_9_6_0_5, "Retail": 5_2_3_4_2, "Feminism": 1_6_4_3_3_8, "Writing": 1_1_9_9_2, "Atheism": 1_9_2_2_6_3, "Netflix": 4_8_6_1_6, "Computing": 3_9_6_3_9, "Opinion": 4_3_2_1_3, "Alone": 4_4_9_6_7, "Funny": 5_8_9_1_7, "Gaming": 4_0_3_5_8, "Human": 4_0_8_8, "India": 1_3_3_1, "Joker": 7_7_1_3_8, "Diet": 3_6_2_0_6, "Legal": 1_1_8_5_9, "Norman": 4_9_3_9, "Tip": 7_2_6_8_9, "Weight": 5_2_3_4_3, "Movies": 4_6_2_7_3, "Running": 2_3_4_2_5, "Science": 2_0_9_0, "Horror": 3_7_7_9_3, "Confession": 6_0_5_7_2, "Finance": 1_2_2_5_0, "Politics": 1_6_3_6_0, "Scary": 1_9_1_9_8_5, "Support": 1_2_6_5_4, "Technologies": 3_2_5_1_6, "Teenage": 6_6_1_6_0, "Event": 3_2_7_6_9, "Learned": 6_7_4_6_0, "Notion": 1_8_2_7_7_0, "Wikipedia": 3_7_5_8_3, "Books": 6_6_6_5, "Extract": 7_6_0_5_0, "Confessions": 1_0_2_7_0_1, "Conspiracy": 7_5_9_3_2, "Links": 6_3_6_7_4, "Narcissus": 1_5_0_4_2_5, "Relationship": 5_4_7_6_6, "Relationships": 1_3_4_7_9_6, "Reviews": 4_1_6_7_1, "News": 4_2_5_6, "Translation": 2_6_8_2_0, "multilingual": 1_2_8_4_0_6, } def __a ( A ): '''simple docstring''' lowercase__ = set() lowercase__ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowercase__ = char lowercase__ = set(A ) return pairs class a__ ( _a ): snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = CONTROL_CODES def __init__( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase="<unk>", **_UpperCAmelCase ): '''simple docstring''' super().__init__(unk_token=_UpperCAmelCase, **_UpperCAmelCase ) with open(_UpperCAmelCase, encoding="utf-8" ) as vocab_handle: lowercase__ = json.load(_UpperCAmelCase ) lowercase__ = {v: k for k, v in self.encoder.items()} with open(_UpperCAmelCase, encoding="utf-8" ) as merges_handle: lowercase__ = merges_handle.read().split("\n" )[1:-1] lowercase__ = [tuple(merge.split() ) for merge in merges] lowercase__ = dict(zip(_UpperCAmelCase, range(len(_UpperCAmelCase ) ) ) ) lowercase__ = {} @property def snake_case__ ( self ): '''simple docstring''' return len(self.encoder ) def snake_case__ ( self ): '''simple docstring''' return dict(self.encoder, **self.added_tokens_encoder ) def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' if token in self.cache: return self.cache[token] lowercase__ = tuple(_UpperCAmelCase ) lowercase__ = tuple(list(word[:-1] ) + [word[-1] + "</w>"] ) lowercase__ = get_pairs(_UpperCAmelCase ) if not pairs: return token while True: lowercase__ = min(_UpperCAmelCase, key=lambda _UpperCAmelCase : self.bpe_ranks.get(_UpperCAmelCase, float("inf" ) ) ) if bigram not in self.bpe_ranks: break lowercase__ , lowercase__ = bigram lowercase__ = [] lowercase__ = 0 while i < len(_UpperCAmelCase ): try: lowercase__ = word.index(_UpperCAmelCase, _UpperCAmelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowercase__ = j if word[i] == first and i < len(_UpperCAmelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowercase__ = tuple(_UpperCAmelCase ) lowercase__ = new_word if len(_UpperCAmelCase ) == 1: break else: lowercase__ = get_pairs(_UpperCAmelCase ) lowercase__ = "@@ ".join(_UpperCAmelCase ) lowercase__ = word[:-4] lowercase__ = word return word def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' lowercase__ = [] lowercase__ = re.findall(R"\S+\n?", _UpperCAmelCase ) for token in words: split_tokens.extend(list(self.bpe(_UpperCAmelCase ).split(" " ) ) ) return split_tokens def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' return self.encoder.get(_UpperCAmelCase, self.encoder.get(self.unk_token ) ) def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' return self.decoder.get(_UpperCAmelCase, self.unk_token ) def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' lowercase__ = " ".join(_UpperCAmelCase ).replace("@@ ", "" ).strip() return out_string def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase = None ): '''simple docstring''' if not os.path.isdir(_UpperCAmelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowercase__ = os.path.join( _UpperCAmelCase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) lowercase__ = os.path.join( _UpperCAmelCase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(_UpperCAmelCase, "w", encoding="utf-8" ) as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=_UpperCAmelCase, ensure_ascii=_UpperCAmelCase ) + "\n" ) lowercase__ = 0 with open(_UpperCAmelCase, "w", encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda _UpperCAmelCase : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' " Please check that the tokenizer is not corrupted!" ) lowercase__ = token_index writer.write(" ".join(_UpperCAmelCase ) + "\n" ) index += 1 return vocab_file, merge_file # def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True): # filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)) # tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens) # tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far) # return ''.join(tokens_generated_so_far)
668
"""simple docstring""" import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, ByTaTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): lowerCAmelCase_: Dict = "pt" elif is_tf_available(): lowerCAmelCase_: Dict = "tf" else: lowerCAmelCase_: str = "jax" class a__ ( _a , unittest.TestCase ): snake_case_ = ByTaTokenizer snake_case_ = False def snake_case__ ( self ): '''simple docstring''' super().setUp() lowercase__ = ByTaTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def snake_case__ ( self ): '''simple docstring''' return ByTaTokenizer.from_pretrained("google/byt5-small" ) def snake_case__ ( self, **_UpperCAmelCase ): '''simple docstring''' return self.tokenizer_class.from_pretrained(self.tmpdirname, **_UpperCAmelCase ) def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase=False, _UpperCAmelCase=20, _UpperCAmelCase=5 ): '''simple docstring''' lowercase__ = [] for i in range(len(_UpperCAmelCase ) ): try: lowercase__ = tokenizer.decode([i], clean_up_tokenization_spaces=_UpperCAmelCase ) except UnicodeDecodeError: pass toks.append((i, tok) ) lowercase__ = list(filter(lambda _UpperCAmelCase : re.match(R"^[ a-zA-Z]+$", t[1] ), _UpperCAmelCase ) ) lowercase__ = list(filter(lambda _UpperCAmelCase : [t[0]] == tokenizer.encode(t[1], add_special_tokens=_UpperCAmelCase ), _UpperCAmelCase ) ) if max_length is not None and len(_UpperCAmelCase ) > max_length: lowercase__ = toks[:max_length] if min_length is not None and len(_UpperCAmelCase ) < min_length and len(_UpperCAmelCase ) > 0: while len(_UpperCAmelCase ) < min_length: lowercase__ = toks + toks # toks_str = [t[1] for t in toks] lowercase__ = [t[0] for t in toks] # Ensure consistency lowercase__ = tokenizer.decode(_UpperCAmelCase, clean_up_tokenization_spaces=_UpperCAmelCase ) if " " not in output_txt and len(_UpperCAmelCase ) > 1: lowercase__ = ( tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=_UpperCAmelCase ) + " " + tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=_UpperCAmelCase ) ) if with_prefix_space: lowercase__ = " " + output_txt lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) return output_txt, output_ids def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.ta_base_tokenizer lowercase__ = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"] ) lowercase__ = tokenizer(["hi", "I went to the gym", ""] ) self.assertListEqual(batch_with_eos_added["input_ids"], batch_without_eos_added["input_ids"] ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.ta_base_tokenizer lowercase__ = "Unicode €." lowercase__ = tokenizer(_UpperCAmelCase ) lowercase__ = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1] self.assertEqual(encoded["input_ids"], _UpperCAmelCase ) # decoding lowercase__ = tokenizer.decode(_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase, "Unicode €.</s>" ) lowercase__ = tokenizer("e è é ê ë" ) lowercase__ = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1] self.assertEqual(encoded["input_ids"], _UpperCAmelCase ) # decoding lowercase__ = tokenizer.decode(_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase, "e è é ê ë</s>" ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ), "e è é ê ë</s>" ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.ta_base_tokenizer lowercase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."] # fmt: off lowercase__ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0] # fmt: on lowercase__ = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, return_tensors=_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase ) if FRAMEWORK != "jax": lowercase__ = list(batch.input_ids.numpy()[0] ) else: lowercase__ = list(batch.input_ids.tolist()[0] ) self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase ) self.assertEqual((2, 37), batch.input_ids.shape ) self.assertEqual((2, 37), batch.attention_mask.shape ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.ta_base_tokenizer lowercase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."] lowercase__ = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, return_tensors=_UpperCAmelCase ) # check if input_ids are returned and no decoder_input_ids self.assertIn("input_ids", _UpperCAmelCase ) self.assertIn("attention_mask", _UpperCAmelCase ) self.assertNotIn("decoder_input_ids", _UpperCAmelCase ) self.assertNotIn("decoder_attention_mask", _UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.ta_base_tokenizer lowercase__ = [ "Summary of the text.", "Another summary.", ] lowercase__ = tokenizer( text_target=_UpperCAmelCase, max_length=32, padding="max_length", truncation=_UpperCAmelCase, return_tensors=_UpperCAmelCase ) self.assertEqual(32, targets["input_ids"].shape[1] ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.ta_base_tokenizer lowercase__ = ["A long paragraph for summarization. </s>"] lowercase__ = ["Summary of the text. </s>"] # fmt: off lowercase__ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1] lowercase__ = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1] # fmt: on lowercase__ = tokenizer(_UpperCAmelCase, text_target=_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase, batch["input_ids"][0] ) self.assertEqual(_UpperCAmelCase, batch["labels"][0] ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length, 42 ) # Now let's start the test lowercase__ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc lowercase__ = tempfile.mkdtemp() lowercase__ = " He is very happy, UNwant\u00E9d,running" lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) tokenizer.save_pretrained(_UpperCAmelCase ) lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase ) lowercase__ = after_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase ) shutil.rmtree(_UpperCAmelCase ) lowercase__ = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc lowercase__ = tempfile.mkdtemp() lowercase__ = " He is very happy, UNwant\u00E9d,running" tokenizer.add_tokens(["bim", "bambam"] ) lowercase__ = tokenizer.additional_special_tokens additional_special_tokens.append("new_additional_special_token" ) tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} ) lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) tokenizer.save_pretrained(_UpperCAmelCase ) lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase ) lowercase__ = after_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase ) self.assertIn("new_additional_special_token", after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length, 42 ) lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase, model_max_length=43 ) self.assertEqual(tokenizer.model_max_length, 43 ) shutil.rmtree(_UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(_UpperCAmelCase ) with open(os.path.join(_UpperCAmelCase, "special_tokens_map.json" ), encoding="utf-8" ) as json_file: lowercase__ = json.load(_UpperCAmelCase ) with open(os.path.join(_UpperCAmelCase, "tokenizer_config.json" ), encoding="utf-8" ) as json_file: lowercase__ = json.load(_UpperCAmelCase ) lowercase__ = [F'''<extra_id_{i}>''' for i in range(125 )] lowercase__ = added_tokens_extra_ids + [ "an_additional_special_token" ] lowercase__ = added_tokens_extra_ids + [ "an_additional_special_token" ] with open(os.path.join(_UpperCAmelCase, "special_tokens_map.json" ), "w", encoding="utf-8" ) as outfile: json.dump(_UpperCAmelCase, _UpperCAmelCase ) with open(os.path.join(_UpperCAmelCase, "tokenizer_config.json" ), "w", encoding="utf-8" ) as outfile: json.dump(_UpperCAmelCase, _UpperCAmelCase ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files lowercase__ = tokenizer_class.from_pretrained( _UpperCAmelCase, ) self.assertIn( "an_additional_special_token", tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( ["an_additional_special_token"], tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ), ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained lowercase__ = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token", lstrip=_UpperCAmelCase )] lowercase__ = tokenizer_class.from_pretrained( _UpperCAmelCase, additional_special_tokens=_UpperCAmelCase, ) self.assertIn("a_new_additional_special_token", tokenizer.additional_special_tokens ) self.assertEqual( ["a_new_additional_special_token"], tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ), ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(_UpperCAmelCase ) lowercase__ = tokenizer_class.from_pretrained(_UpperCAmelCase ) self.assertTrue(tokenizer.decode([255] ) == "" ) def snake_case__ ( self ): '''simple docstring''' pass def snake_case__ ( self ): '''simple docstring''' pass def snake_case__ ( self ): '''simple docstring''' pass def snake_case__ ( self ): '''simple docstring''' pass def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.get_tokenizers(fast=_UpperCAmelCase, do_lower_case=_UpperCAmelCase ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): lowercase__ = ["t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "x", "t", "</s>"] lowercase__ = tokenizer.convert_tokens_to_string(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): lowercase__ = [ "bos_token", "eos_token", "unk_token", "sep_token", "pad_token", "cls_token", "mask_token", ] lowercase__ = 0 lowercase__ = tokenizer.convert_ids_to_tokens( _UpperCAmelCase, skip_special_tokens=_UpperCAmelCase ) for attr in attributes_list: setattr(_UpperCAmelCase, attr + "_id", _UpperCAmelCase ) self.assertEqual(getattr(_UpperCAmelCase, _UpperCAmelCase ), _UpperCAmelCase ) self.assertEqual(getattr(_UpperCAmelCase, attr + "_id" ), _UpperCAmelCase ) setattr(_UpperCAmelCase, attr + "_id", _UpperCAmelCase ) self.assertEqual(getattr(_UpperCAmelCase, _UpperCAmelCase ), _UpperCAmelCase ) self.assertEqual(getattr(_UpperCAmelCase, attr + "_id" ), _UpperCAmelCase ) setattr(_UpperCAmelCase, "additional_special_tokens_ids", [] ) self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens" ), [] ) self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens_ids" ), [] ) setattr(_UpperCAmelCase, "additional_special_tokens_ids", [token_id_to_test_setters] ) self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens" ), [token_to_test_setters] ) self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens_ids" ), [token_id_to_test_setters] )
668
1
"""simple docstring""" import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def __a ( A ): '''simple docstring''' return (data["data"], data["target"]) def __a ( A , A , A ): '''simple docstring''' lowercase__ = XGBRegressor(verbosity=0 , random_state=42 ) xgb.fit(A , A ) # Predict target for test data lowercase__ = xgb.predict(A ) lowercase__ = predictions.reshape(len(A ) , 1 ) return predictions def __a ( ): '''simple docstring''' lowercase__ = fetch_california_housing() lowercase__ , lowercase__ = data_handling(A ) lowercase__ , lowercase__ , lowercase__ , lowercase__ = train_test_split( A , A , test_size=0.25 , random_state=1 ) lowercase__ = xgboost(A , A , A ) # Error printing print(f'''Mean Absolute Error : {mean_absolute_error(A , A )}''' ) print(f'''Mean Square Error : {mean_squared_error(A , A )}''' ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
668
"""simple docstring""" import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class a__ ( unittest.TestCase ): snake_case_ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' lowercase__ = hf_hub_download( repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset" ) lowercase__ = VideoClassificationPipeline(model=_UpperCAmelCase, image_processor=_UpperCAmelCase, top_k=2 ) lowercase__ = [ example_video_filepath, "https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4", ] return video_classifier, examples def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' for example in examples: lowercase__ = video_classifier(_UpperCAmelCase ) self.assertEqual( _UpperCAmelCase, [ {"score": ANY(_UpperCAmelCase ), "label": ANY(_UpperCAmelCase )}, {"score": ANY(_UpperCAmelCase ), "label": ANY(_UpperCAmelCase )}, ], ) @require_torch def snake_case__ ( self ): '''simple docstring''' lowercase__ = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification" lowercase__ = VideoMAEFeatureExtractor( size={"shortest_edge": 10}, crop_size={"height": 10, "width": 10} ) lowercase__ = pipeline( "video-classification", model=_UpperCAmelCase, feature_extractor=_UpperCAmelCase, frame_sampling_rate=4 ) lowercase__ = hf_hub_download(repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset" ) lowercase__ = video_classifier(_UpperCAmelCase, top_k=2 ) self.assertEqual( nested_simplify(_UpperCAmelCase, decimals=4 ), [{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}], ) lowercase__ = video_classifier( [ video_file_path, video_file_path, ], top_k=2, ) self.assertEqual( nested_simplify(_UpperCAmelCase, decimals=4 ), [ [{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}], [{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}], ], ) @require_tf def snake_case__ ( self ): '''simple docstring''' pass
668
1
"""simple docstring""" from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass lowerCAmelCase_: Dict = (3, 9, -1_1, 0, 7, 5, 1, -1) lowerCAmelCase_: Optional[int] = (4, 6, 2, 0, 8, 1_0, 3, -2) @dataclass class a__ : snake_case_ = 42 snake_case_ = 42 class a__ : def __init__( self, _UpperCAmelCase ): '''simple docstring''' lowercase__ = None for i in sorted(_UpperCAmelCase, reverse=_UpperCAmelCase ): lowercase__ = Node(_UpperCAmelCase, self.head ) def __iter__( self ): '''simple docstring''' lowercase__ = self.head while node: yield node.data lowercase__ = node.next_node def __len__( self ): '''simple docstring''' return sum(1 for _ in self ) def __str__( self ): '''simple docstring''' return " -> ".join([str(_UpperCAmelCase ) for node in self] ) def __a ( A , A ): '''simple docstring''' return SortedLinkedList(list(A ) + list(A ) ) if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase_: List[Any] = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
668
"""simple docstring""" import itertools import math def __a ( A ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __a ( ): '''simple docstring''' lowercase__ = 2 while True: if is_prime(A ): yield num num += 1 def __a ( A = 1_00_01 ): '''simple docstring''' return next(itertools.islice(prime_generator() , nth - 1 , A ) ) if __name__ == "__main__": print(F'{solution() = }')
668
1
"""simple docstring""" import glob import os import random from string import ascii_lowercase, digits import cva import numpy as np # Parrameters lowerCAmelCase_: Tuple = (7_2_0, 1_2_8_0) # Height, Width lowerCAmelCase_: List[Any] = (0.4, 0.6) # if height or width lower than this scale, drop it. lowerCAmelCase_: List[Any] = 1 / 1_0_0 lowerCAmelCase_: List[str] = "" lowerCAmelCase_: List[str] = "" lowerCAmelCase_: Optional[int] = "" lowerCAmelCase_: List[str] = 2_5_0 def __a ( ): '''simple docstring''' lowercase__ , lowercase__ = get_dataset(A , A ) for index in range(A ): lowercase__ = random.sample(range(len(A ) ) , 4 ) lowercase__ , lowercase__ , lowercase__ = update_image_and_anno( A , A , A , A , A , filter_scale=A , ) # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' lowercase__ = random_chars(32 ) lowercase__ = path.split(os.sep )[-1].rsplit("." , 1 )[0] lowercase__ = f'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}''' cva.imwrite(f'''{file_root}.jpg''' , A , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' ) lowercase__ = [] for anno in new_annos: lowercase__ = anno[3] - anno[1] lowercase__ = anno[4] - anno[2] lowercase__ = anno[1] + width / 2 lowercase__ = anno[2] + height / 2 lowercase__ = f'''{anno[0]} {x_center} {y_center} {width} {height}''' annos_list.append(A ) with open(f'''{file_root}.txt''' , "w" ) as outfile: outfile.write("\n".join(line for line in annos_list ) ) def __a ( A , A ): '''simple docstring''' lowercase__ = [] lowercase__ = [] for label_file in glob.glob(os.path.join(A , "*.txt" ) ): lowercase__ = label_file.split(os.sep )[-1].rsplit("." , 1 )[0] with open(A ) as in_file: lowercase__ = in_file.readlines() lowercase__ = os.path.join(A , f'''{label_name}.jpg''' ) lowercase__ = [] for obj_list in obj_lists: lowercase__ = obj_list.rstrip("\n" ).split(" " ) lowercase__ = float(obj[1] ) - float(obj[3] ) / 2 lowercase__ = float(obj[2] ) - float(obj[4] ) / 2 lowercase__ = float(obj[1] ) + float(obj[3] ) / 2 lowercase__ = float(obj[2] ) + float(obj[4] ) / 2 boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] ) if not boxes: continue img_paths.append(A ) labels.append(A ) return img_paths, labels def __a ( A , A , A , A , A , A = 0.0 , ): '''simple docstring''' lowercase__ = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta ) lowercase__ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) lowercase__ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) lowercase__ = int(scale_x * output_size[1] ) lowercase__ = int(scale_y * output_size[0] ) lowercase__ = [] lowercase__ = [] for i, index in enumerate(A ): lowercase__ = all_img_list[index] path_list.append(A ) lowercase__ = all_annos[index] lowercase__ = cva.imread(A ) if i == 0: # top-left lowercase__ = cva.resize(A , (divid_point_x, divid_point_y) ) lowercase__ = img for bbox in img_annos: lowercase__ = bbox[1] * scale_x lowercase__ = bbox[2] * scale_y lowercase__ = bbox[3] * scale_x lowercase__ = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 1: # top-right lowercase__ = cva.resize(A , (output_size[1] - divid_point_x, divid_point_y) ) lowercase__ = img for bbox in img_annos: lowercase__ = scale_x + bbox[1] * (1 - scale_x) lowercase__ = bbox[2] * scale_y lowercase__ = scale_x + bbox[3] * (1 - scale_x) lowercase__ = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 2: # bottom-left lowercase__ = cva.resize(A , (divid_point_x, output_size[0] - divid_point_y) ) lowercase__ = img for bbox in img_annos: lowercase__ = bbox[1] * scale_x lowercase__ = scale_y + bbox[2] * (1 - scale_y) lowercase__ = bbox[3] * scale_x lowercase__ = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) else: # bottom-right lowercase__ = cva.resize( A , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) ) lowercase__ = img for bbox in img_annos: lowercase__ = scale_x + bbox[1] * (1 - scale_x) lowercase__ = scale_y + bbox[2] * (1 - scale_y) lowercase__ = scale_x + bbox[3] * (1 - scale_x) lowercase__ = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) # Remove bounding box small than scale of filter if filter_scale > 0: lowercase__ = [ anno for anno in new_anno if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2]) ] return output_img, new_anno, path_list[0] def __a ( A ): '''simple docstring''' assert number_char > 1, "The number of character should greater than 1" lowercase__ = ascii_lowercase + digits return "".join(random.choice(A ) for _ in range(A ) ) if __name__ == "__main__": main() print("DONE ✅")
668
"""simple docstring""" from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class a__ ( _a ): def __init__( self, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = False, _UpperCAmelCase = False, _UpperCAmelCase = None, **_UpperCAmelCase, ): '''simple docstring''' super().__init__( _UpperCAmelCase, split=_UpperCAmelCase, features=_UpperCAmelCase, cache_dir=_UpperCAmelCase, keep_in_memory=_UpperCAmelCase, streaming=_UpperCAmelCase, num_proc=_UpperCAmelCase, **_UpperCAmelCase, ) lowercase__ = path_or_paths if isinstance(_UpperCAmelCase, _UpperCAmelCase ) else {self.split: path_or_paths} lowercase__ = Text( cache_dir=_UpperCAmelCase, data_files=_UpperCAmelCase, features=_UpperCAmelCase, **_UpperCAmelCase, ) def snake_case__ ( self ): '''simple docstring''' if self.streaming: lowercase__ = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: lowercase__ = None lowercase__ = None lowercase__ = None lowercase__ = None self.builder.download_and_prepare( download_config=_UpperCAmelCase, download_mode=_UpperCAmelCase, verification_mode=_UpperCAmelCase, base_path=_UpperCAmelCase, num_proc=self.num_proc, ) lowercase__ = self.builder.as_dataset( split=self.split, verification_mode=_UpperCAmelCase, in_memory=self.keep_in_memory ) return dataset
668
1
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
668
"""simple docstring""" import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler lowerCAmelCase_: List[str] = 1_6 lowerCAmelCase_: Optional[Any] = 3_2 def __a ( A , A = 16 , A = "bert-base-cased" ): '''simple docstring''' lowercase__ = AutoTokenizer.from_pretrained(A ) lowercase__ = load_dataset("glue" , "mrpc" ) def tokenize_function(A ): # max_length=None => use the model max length (it's actually the default) lowercase__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=A , max_length=A ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset lowercase__ = datasets.map( A , batched=A , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=A ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowercase__ = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(A ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(A , padding="max_length" , max_length=1_28 , return_tensors="pt" ) return tokenizer.pad(A , padding="longest" , return_tensors="pt" ) # Instantiate dataloaders. lowercase__ = DataLoader( tokenized_datasets["train"] , shuffle=A , collate_fn=A , batch_size=A ) lowercase__ = DataLoader( tokenized_datasets["validation"] , shuffle=A , collate_fn=A , batch_size=A ) return train_dataloader, eval_dataloader def __a ( A , A ): '''simple docstring''' lowercase__ = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowercase__ = config["lr"] lowercase__ = int(config["num_epochs"] ) lowercase__ = int(config["seed"] ) lowercase__ = int(config["batch_size"] ) lowercase__ = args.model_name_or_path set_seed(A ) lowercase__ , lowercase__ = get_dataloaders(A , A , A ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowercase__ = AutoModelForSequenceClassification.from_pretrained(A , return_dict=A ) # Instantiate optimizer lowercase__ = ( AdamW if accelerator.state.deepspeed_plugin is None or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) lowercase__ = optimizer_cls(params=model.parameters() , lr=A ) if accelerator.state.deepspeed_plugin is not None: lowercase__ = accelerator.state.deepspeed_plugin.deepspeed_config[ "gradient_accumulation_steps" ] else: lowercase__ = 1 lowercase__ = (len(A ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): lowercase__ = get_linear_schedule_with_warmup( optimizer=A , num_warmup_steps=0 , num_training_steps=A , ) else: lowercase__ = DummyScheduler(A , total_num_steps=A , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare( A , A , A , A , A ) # We need to keep track of how many total steps we have iterated over lowercase__ = 0 # We also need to keep track of the stating epoch so files are named properly lowercase__ = 0 # Now we train the model lowercase__ = evaluate.load("glue" , "mrpc" ) lowercase__ = 0 lowercase__ = {} for epoch in range(A , A ): model.train() for step, batch in enumerate(A ): lowercase__ = model(**A ) lowercase__ = outputs.loss lowercase__ = loss / gradient_accumulation_steps accelerator.backward(A ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() lowercase__ = 0 for step, batch in enumerate(A ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowercase__ = model(**A ) lowercase__ = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times lowercase__ , lowercase__ = accelerator.gather( (predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(A ) - 1: lowercase__ = predictions[: len(eval_dataloader.dataset ) - samples_seen] lowercase__ = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=A , references=A , ) lowercase__ = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}:''' , A ) lowercase__ = eval_metric["accuracy"] if best_performance < eval_metric["accuracy"]: lowercase__ = eval_metric["accuracy"] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}''' accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , "all_results.json" ) , "w" ) as f: json.dump(A , A ) def __a ( ): '''simple docstring''' lowercase__ = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." ) parser.add_argument( "--model_name_or_path" , type=A , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=A , ) parser.add_argument( "--output_dir" , type=A , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , ) parser.add_argument( "--performance_lower_bound" , type=A , default=A , help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value." , ) parser.add_argument( "--num_epochs" , type=A , default=3 , help="Number of train epochs." , ) lowercase__ = parser.parse_args() lowercase__ = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16} training_function(A , A ) if __name__ == "__main__": main()
668
1
"""simple docstring""" from jiwer import compute_measures import datasets lowerCAmelCase_: List[Any] = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n" lowerCAmelCase_: int = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n" lowerCAmelCase_: List[Any] = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a__ ( datasets.Metric ): def snake_case__ ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { "predictions": datasets.Value("string", id="sequence" ), "references": datasets.Value("string", id="sequence" ), } ), codebase_urls=["https://github.com/jitsi/jiwer/"], reference_urls=[ "https://en.wikipedia.org/wiki/Word_error_rate", ], ) def snake_case__ ( self, _UpperCAmelCase=None, _UpperCAmelCase=None, _UpperCAmelCase=False ): '''simple docstring''' if concatenate_texts: return compute_measures(_UpperCAmelCase, _UpperCAmelCase )["wer"] else: lowercase__ = 0 lowercase__ = 0 for prediction, reference in zip(_UpperCAmelCase, _UpperCAmelCase ): lowercase__ = compute_measures(_UpperCAmelCase, _UpperCAmelCase ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
668
"""simple docstring""" import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class a__ ( _a ): snake_case_ = (IPNDMScheduler,) snake_case_ = (("num_inference_steps", 50),) def snake_case__ ( self, **_UpperCAmelCase ): '''simple docstring''' lowercase__ = {"num_train_timesteps": 1000} config.update(**_UpperCAmelCase ) return config def snake_case__ ( self, _UpperCAmelCase=0, **_UpperCAmelCase ): '''simple docstring''' lowercase__ = dict(self.forward_default_kwargs ) lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase ) lowercase__ = self.dummy_sample lowercase__ = 0.1 * sample lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: lowercase__ = self.get_scheduler_config(**_UpperCAmelCase ) lowercase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals lowercase__ = dummy_past_residuals[:] if time_step is None: lowercase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_UpperCAmelCase ) lowercase__ = scheduler_class.from_pretrained(_UpperCAmelCase ) new_scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals lowercase__ = dummy_past_residuals[:] lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def snake_case__ ( self ): '''simple docstring''' pass def snake_case__ ( self, _UpperCAmelCase=0, **_UpperCAmelCase ): '''simple docstring''' lowercase__ = dict(self.forward_default_kwargs ) lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase ) lowercase__ = self.dummy_sample lowercase__ = 0.1 * sample lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: lowercase__ = self.get_scheduler_config() lowercase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals (must be after setting timesteps) lowercase__ = dummy_past_residuals[:] if time_step is None: lowercase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_UpperCAmelCase ) lowercase__ = scheduler_class.from_pretrained(_UpperCAmelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residual (must be after setting timesteps) lowercase__ = dummy_past_residuals[:] lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def snake_case__ ( self, **_UpperCAmelCase ): '''simple docstring''' lowercase__ = self.scheduler_classes[0] lowercase__ = self.get_scheduler_config(**_UpperCAmelCase ) lowercase__ = scheduler_class(**_UpperCAmelCase ) lowercase__ = 10 lowercase__ = self.dummy_model() lowercase__ = self.dummy_sample_deter scheduler.set_timesteps(_UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): lowercase__ = model(_UpperCAmelCase, _UpperCAmelCase ) lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ).prev_sample for i, t in enumerate(scheduler.timesteps ): lowercase__ = model(_UpperCAmelCase, _UpperCAmelCase ) lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ).prev_sample return sample def snake_case__ ( self ): '''simple docstring''' lowercase__ = dict(self.forward_default_kwargs ) lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase ) for scheduler_class in self.scheduler_classes: lowercase__ = self.get_scheduler_config() lowercase__ = scheduler_class(**_UpperCAmelCase ) lowercase__ = self.dummy_sample lowercase__ = 0.1 * sample if num_inference_steps is not None and hasattr(_UpperCAmelCase, "set_timesteps" ): scheduler.set_timesteps(_UpperCAmelCase ) elif num_inference_steps is not None and not hasattr(_UpperCAmelCase, "set_timesteps" ): lowercase__ = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] lowercase__ = dummy_past_residuals[:] lowercase__ = scheduler.timesteps[5] lowercase__ = scheduler.timesteps[6] lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample self.assertEqual(output_a.shape, sample.shape ) self.assertEqual(output_a.shape, output_a.shape ) lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample self.assertEqual(output_a.shape, sample.shape ) self.assertEqual(output_a.shape, output_a.shape ) def snake_case__ ( self ): '''simple docstring''' for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=_UpperCAmelCase, time_step=_UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100] ): self.check_over_forward(num_inference_steps=_UpperCAmelCase, time_step=_UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.full_loop() lowercase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_mean.item() - 254_0529 ) < 10
668
1
"""simple docstring""" import argparse import OmegaConf import torch from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel def __a ( A , A , A ): '''simple docstring''' lowercase__ = OmegaConf.load(A ) lowercase__ = torch.load(A , map_location="cpu" )["model"] lowercase__ = list(state_dict.keys() ) # extract state_dict for VQVAE lowercase__ = {} lowercase__ = "first_stage_model." for key in keys: if key.startswith(A ): lowercase__ = state_dict[key] # extract state_dict for UNetLDM lowercase__ = {} lowercase__ = "model.diffusion_model." for key in keys: if key.startswith(A ): lowercase__ = state_dict[key] lowercase__ = config.model.params.first_stage_config.params lowercase__ = config.model.params.unet_config.params lowercase__ = VQModel(**A ).eval() vqvae.load_state_dict(A ) lowercase__ = UNetLDMModel(**A ).eval() unet.load_state_dict(A ) lowercase__ = DDIMScheduler( timesteps=config.model.params.timesteps , beta_schedule="scaled_linear" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=A , ) lowercase__ = LDMPipeline(A , A , A ) pipeline.save_pretrained(A ) if __name__ == "__main__": lowerCAmelCase_: Optional[Any] = argparse.ArgumentParser() parser.add_argument("--checkpoint_path", type=str, required=True) parser.add_argument("--config_path", type=str, required=True) parser.add_argument("--output_path", type=str, required=True) lowerCAmelCase_: Tuple = parser.parse_args() convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
668
"""simple docstring""" import json import os import unittest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class a__ ( _a , unittest.TestCase ): snake_case_ = MgpstrTokenizer snake_case_ = False snake_case_ = {} snake_case_ = False def snake_case__ ( self ): '''simple docstring''' super().setUp() # fmt: off lowercase__ = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"] # fmt: on lowercase__ = dict(zip(_UpperCAmelCase, range(len(_UpperCAmelCase ) ) ) ) lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file, "w", encoding="utf-8" ) as fp: fp.write(json.dumps(_UpperCAmelCase ) + "\n" ) def snake_case__ ( self, **_UpperCAmelCase ): '''simple docstring''' return MgpstrTokenizer.from_pretrained(self.tmpdirname, **_UpperCAmelCase ) def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' lowercase__ = "tester" lowercase__ = "tester" return input_text, output_text @unittest.skip("MGP-STR always lower cases letters." ) def snake_case__ ( self ): '''simple docstring''' pass def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.get_tokenizers(do_lower_case=_UpperCAmelCase ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): lowercase__ = "[SPECIAL_TOKEN]" tokenizer.add_special_tokens({"cls_token": special_token} ) lowercase__ = tokenizer.encode([special_token], add_special_tokens=_UpperCAmelCase ) self.assertEqual(len(_UpperCAmelCase ), 1 ) lowercase__ = tokenizer.decode(_UpperCAmelCase, skip_special_tokens=_UpperCAmelCase ) self.assertTrue(special_token not in decoded ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): lowercase__ , lowercase__ = self.get_input_output_texts(_UpperCAmelCase ) lowercase__ = tokenizer.tokenize(_UpperCAmelCase ) lowercase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) self.assertNotEqual(len(_UpperCAmelCase ), 0 ) lowercase__ = tokenizer.decode(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase ) self.assertEqual(text_a.replace(" ", "" ), _UpperCAmelCase ) @unittest.skip("MGP-STR tokenizer only handles one sequence." ) def snake_case__ ( self ): '''simple docstring''' pass @unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" ) def snake_case__ ( self ): '''simple docstring''' pass
668
1
"""simple docstring""" from typing import Any import numpy as np def __a ( A ): '''simple docstring''' return np.array_equal(A , matrix.conjugate().T ) def __a ( A , A ): '''simple docstring''' lowercase__ = v.conjugate().T lowercase__ = v_star.dot(A ) assert isinstance(A , np.ndarray ) return (v_star_dot.dot(A )) / (v_star.dot(A )) def __a ( ): '''simple docstring''' lowercase__ = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] ) lowercase__ = np.array([[1], [2], [3]] ) assert is_hermitian(A ), f'''{a} is not hermitian.''' print(rayleigh_quotient(A , A ) ) lowercase__ = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] ) assert is_hermitian(A ), f'''{a} is not hermitian.''' assert rayleigh_quotient(A , A ) == float(3 ) if __name__ == "__main__": import doctest doctest.testmod() tests()
668
"""simple docstring""" from ...utils import is_note_seq_available, is_transformers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .notes_encoder import SpectrogramNotesEncoder from .continous_encoder import SpectrogramContEncoder from .pipeline_spectrogram_diffusion import ( SpectrogramContEncoder, SpectrogramDiffusionPipeline, TaFilmDecoder, ) try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .midi_utils import MidiProcessor
668
1
"""simple docstring""" import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def __a ( A ): '''simple docstring''' lowercase__ = 3_84 lowercase__ = 7 if "tiny" in model_name: lowercase__ = 96 lowercase__ = (2, 2, 6, 2) lowercase__ = (3, 6, 12, 24) elif "small" in model_name: lowercase__ = 96 lowercase__ = (2, 2, 18, 2) lowercase__ = (3, 6, 12, 24) elif "base" in model_name: lowercase__ = 1_28 lowercase__ = (2, 2, 18, 2) lowercase__ = (4, 8, 16, 32) lowercase__ = 12 lowercase__ = 5_12 elif "large" in model_name: lowercase__ = 1_92 lowercase__ = (2, 2, 18, 2) lowercase__ = (6, 12, 24, 48) lowercase__ = 12 lowercase__ = 7_68 # set label information lowercase__ = 1_50 lowercase__ = "huggingface/label-files" lowercase__ = "ade20k-id2label.json" lowercase__ = json.load(open(hf_hub_download(A , A , repo_type="dataset" ) , "r" ) ) lowercase__ = {int(A ): v for k, v in idalabel.items()} lowercase__ = {v: k for k, v in idalabel.items()} lowercase__ = SwinConfig( embed_dim=A , depths=A , num_heads=A , window_size=A , out_features=["stage1", "stage2", "stage3", "stage4"] , ) lowercase__ = UperNetConfig( backbone_config=A , auxiliary_in_channels=A , num_labels=A , idalabel=A , labelaid=A , ) return config def __a ( A ): '''simple docstring''' lowercase__ = [] # fmt: off # stem rename_keys.append(("backbone.patch_embed.projection.weight", "backbone.embeddings.patch_embeddings.projection.weight") ) rename_keys.append(("backbone.patch_embed.projection.bias", "backbone.embeddings.patch_embeddings.projection.bias") ) rename_keys.append(("backbone.patch_embed.norm.weight", "backbone.embeddings.norm.weight") ) rename_keys.append(("backbone.patch_embed.norm.bias", "backbone.embeddings.norm.bias") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') ) if i < 3: rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') ) rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') ) rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') ) rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') ) rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') ) # decode head rename_keys.extend( [ ("decode_head.conv_seg.weight", "decode_head.classifier.weight"), ("decode_head.conv_seg.bias", "decode_head.classifier.bias"), ("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"), ("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"), ] ) # fmt: on return rename_keys def __a ( A , A , A ): '''simple docstring''' lowercase__ = dct.pop(A ) lowercase__ = val def __a ( A , A ): '''simple docstring''' lowercase__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): lowercase__ = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) lowercase__ = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' ) lowercase__ = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict lowercase__ = in_proj_weight[:dim, :] lowercase__ = in_proj_bias[: dim] lowercase__ = in_proj_weight[ dim : dim * 2, : ] lowercase__ = in_proj_bias[ dim : dim * 2 ] lowercase__ = in_proj_weight[ -dim :, : ] lowercase__ = in_proj_bias[-dim :] # fmt: on def __a ( A ): '''simple docstring''' lowercase__ , lowercase__ = x.shape lowercase__ = x.reshape(A , 4 , in_channel // 4 ) lowercase__ = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(A , A ) return x def __a ( A ): '''simple docstring''' lowercase__ , lowercase__ = x.shape lowercase__ = x.reshape(A , in_channel // 4 , 4 ) lowercase__ = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(A , A ) return x def __a ( A ): '''simple docstring''' lowercase__ = x.shape[0] lowercase__ = x.reshape(4 , in_channel // 4 ) lowercase__ = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(A ) return x def __a ( A ): '''simple docstring''' lowercase__ = x.shape[0] lowercase__ = x.reshape(in_channel // 4 , 4 ) lowercase__ = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(A ) return x def __a ( A , A , A ): '''simple docstring''' lowercase__ = { "upernet-swin-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth", "upernet-swin-small": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth", "upernet-swin-base": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth", "upernet-swin-large": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth", } lowercase__ = model_name_to_url[model_name] lowercase__ = torch.hub.load_state_dict_from_url(A , map_location="cpu" , file_name=A )[ "state_dict" ] for name, param in state_dict.items(): print(A , param.shape ) lowercase__ = get_upernet_config(A ) lowercase__ = UperNetForSemanticSegmentation(A ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): lowercase__ = state_dict.pop(A ) if "bn" in key: lowercase__ = key.replace("bn" , "batch_norm" ) lowercase__ = val # rename keys lowercase__ = create_rename_keys(A ) for src, dest in rename_keys: rename_key(A , A , A ) read_in_q_k_v(A , config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: lowercase__ = reverse_correct_unfold_reduction_order(A ) if "norm" in key: lowercase__ = reverse_correct_unfold_norm_order(A ) model.load_state_dict(A ) # verify on image lowercase__ = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg" lowercase__ = Image.open(requests.get(A , stream=A ).raw ).convert("RGB" ) lowercase__ = SegformerImageProcessor() lowercase__ = processor(A , return_tensors="pt" ).pixel_values with torch.no_grad(): lowercase__ = model(A ) lowercase__ = outputs.logits print(logits.shape ) print("First values of logits:" , logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": lowercase__ = torch.tensor( [[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ) elif model_name == "upernet-swin-small": lowercase__ = torch.tensor( [[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] ) elif model_name == "upernet-swin-base": lowercase__ = torch.tensor( [[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] ) elif model_name == "upernet-swin-large": lowercase__ = torch.tensor( [[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] ) print("Logits:" , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , A , atol=1e-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(A ) print(f'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(A ) if push_to_hub: print(f'''Pushing model and processor for {model_name} to hub''' ) model.push_to_hub(f'''openmmlab/{model_name}''' ) processor.push_to_hub(f'''openmmlab/{model_name}''' ) if __name__ == "__main__": lowerCAmelCase_: Any = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="upernet-swin-tiny", type=str, choices=[F'upernet-swin-{size}' for size in ["tiny", "small", "base", "large"]], help="Name of the Swin + UperNet model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) lowerCAmelCase_: int = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
668
"""simple docstring""" from typing import Any import numpy as np def __a ( A ): '''simple docstring''' return np.array_equal(A , matrix.conjugate().T ) def __a ( A , A ): '''simple docstring''' lowercase__ = v.conjugate().T lowercase__ = v_star.dot(A ) assert isinstance(A , np.ndarray ) return (v_star_dot.dot(A )) / (v_star.dot(A )) def __a ( ): '''simple docstring''' lowercase__ = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] ) lowercase__ = np.array([[1], [2], [3]] ) assert is_hermitian(A ), f'''{a} is not hermitian.''' print(rayleigh_quotient(A , A ) ) lowercase__ = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] ) assert is_hermitian(A ), f'''{a} is not hermitian.''' assert rayleigh_quotient(A , A ) == float(3 ) if __name__ == "__main__": import doctest doctest.testmod() tests()
668
1
"""simple docstring""" from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=_a ) class a__ ( _a ): snake_case_ = field(default="language-modeling" , metadata={"include_in_asdict_even_if_is_default": True} ) snake_case_ = Features({"text": Value("string" )} ) snake_case_ = Features({} ) snake_case_ = "text" @property def snake_case__ ( self ): '''simple docstring''' return {self.text_column: "text"}
668
"""simple docstring""" import gc import inspect import unittest import torch from parameterized import parameterized from diffusers import PriorTransformer from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin enable_full_determinism() class a__ ( _a , unittest.TestCase ): snake_case_ = PriorTransformer snake_case_ = "hidden_states" @property def snake_case__ ( self ): '''simple docstring''' lowercase__ = 4 lowercase__ = 8 lowercase__ = 7 lowercase__ = floats_tensor((batch_size, embedding_dim) ).to(_UpperCAmelCase ) lowercase__ = floats_tensor((batch_size, embedding_dim) ).to(_UpperCAmelCase ) lowercase__ = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def snake_case__ ( self, _UpperCAmelCase=0 ): '''simple docstring''' torch.manual_seed(_UpperCAmelCase ) lowercase__ = 4 lowercase__ = 8 lowercase__ = 7 lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase ) lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase ) lowercase__ = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } @property def snake_case__ ( self ): '''simple docstring''' return (4, 8) @property def snake_case__ ( self ): '''simple docstring''' return (4, 8) def snake_case__ ( self ): '''simple docstring''' lowercase__ = { "num_attention_heads": 2, "attention_head_dim": 4, "num_layers": 2, "embedding_dim": 8, "num_embeddings": 7, "additional_embeddings": 4, } lowercase__ = self.dummy_input return init_dict, inputs_dict def snake_case__ ( self ): '''simple docstring''' lowercase__ , lowercase__ = PriorTransformer.from_pretrained( "hf-internal-testing/prior-dummy", output_loading_info=_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) self.assertEqual(len(loading_info["missing_keys"] ), 0 ) model.to(_UpperCAmelCase ) lowercase__ = model(**self.dummy_input )[0] assert hidden_states is not None, "Make sure output is not None" def snake_case__ ( self ): '''simple docstring''' lowercase__ , lowercase__ = self.prepare_init_args_and_inputs_for_common() lowercase__ = self.model_class(**_UpperCAmelCase ) lowercase__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ = [*signature.parameters.keys()] lowercase__ = ["hidden_states", "timestep"] self.assertListEqual(arg_names[:2], _UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy" ) lowercase__ = model.to(_UpperCAmelCase ) if hasattr(_UpperCAmelCase, "set_default_attn_processor" ): model.set_default_attn_processor() lowercase__ = self.get_dummy_seed_input() with torch.no_grad(): lowercase__ = model(**_UpperCAmelCase )[0] lowercase__ = output[0, :5].flatten().cpu() print(_UpperCAmelCase ) # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. lowercase__ = torch.tensor([-1.3_436, -0.2_870, 0.7_538, 0.4_368, -0.0_239] ) self.assertTrue(torch_all_close(_UpperCAmelCase, _UpperCAmelCase, rtol=1E-2 ) ) @slow class a__ ( unittest.TestCase ): def snake_case__ ( self, _UpperCAmelCase=1, _UpperCAmelCase=768, _UpperCAmelCase=77, _UpperCAmelCase=0 ): '''simple docstring''' torch.manual_seed(_UpperCAmelCase ) lowercase__ = batch_size lowercase__ = embedding_dim lowercase__ = num_embeddings lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase ) lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase ) lowercase__ = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def snake_case__ ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @parameterized.expand( [ # fmt: off [13, [-0.5_861, 0.1_283, -0.0_931, 0.0_882, 0.4_476, 0.1_329, -0.0_498, 0.0_640]], [37, [-0.4_913, 0.0_110, -0.0_483, 0.0_541, 0.4_954, -0.0_170, 0.0_354, 0.1_651]], # fmt: on ] ) def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' lowercase__ = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior", subfolder="prior" ) model.to(_UpperCAmelCase ) lowercase__ = self.get_dummy_seed_input(seed=_UpperCAmelCase ) with torch.no_grad(): lowercase__ = model(**_UpperCAmelCase )[0] assert list(sample.shape ) == [1, 768] lowercase__ = sample[0, :8].flatten().cpu() print(_UpperCAmelCase ) lowercase__ = torch.tensor(_UpperCAmelCase ) assert torch_all_close(_UpperCAmelCase, _UpperCAmelCase, atol=1E-3 )
668
1
"""simple docstring""" import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class a__ ( unittest.TestCase ): snake_case_ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' lowercase__ = hf_hub_download( repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset" ) lowercase__ = VideoClassificationPipeline(model=_UpperCAmelCase, image_processor=_UpperCAmelCase, top_k=2 ) lowercase__ = [ example_video_filepath, "https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4", ] return video_classifier, examples def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' for example in examples: lowercase__ = video_classifier(_UpperCAmelCase ) self.assertEqual( _UpperCAmelCase, [ {"score": ANY(_UpperCAmelCase ), "label": ANY(_UpperCAmelCase )}, {"score": ANY(_UpperCAmelCase ), "label": ANY(_UpperCAmelCase )}, ], ) @require_torch def snake_case__ ( self ): '''simple docstring''' lowercase__ = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification" lowercase__ = VideoMAEFeatureExtractor( size={"shortest_edge": 10}, crop_size={"height": 10, "width": 10} ) lowercase__ = pipeline( "video-classification", model=_UpperCAmelCase, feature_extractor=_UpperCAmelCase, frame_sampling_rate=4 ) lowercase__ = hf_hub_download(repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset" ) lowercase__ = video_classifier(_UpperCAmelCase, top_k=2 ) self.assertEqual( nested_simplify(_UpperCAmelCase, decimals=4 ), [{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}], ) lowercase__ = video_classifier( [ video_file_path, video_file_path, ], top_k=2, ) self.assertEqual( nested_simplify(_UpperCAmelCase, decimals=4 ), [ [{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}], [{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}], ], ) @require_tf def snake_case__ ( self ): '''simple docstring''' pass
668
"""simple docstring""" lowerCAmelCase_: Any = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" def __a ( A ): '''simple docstring''' if not isinstance(A , A ): lowercase__ = f'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(A ) lowercase__ = "".join(bin(A )[2:].zfill(8 ) for byte in data ) lowercase__ = len(A ) % 6 != 0 if padding_needed: # The padding that will be added later lowercase__ = b"=" * ((6 - len(A ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(A ) % 6) else: lowercase__ = b"" # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(A ) , 6 ) ).encode() + padding ) def __a ( A ): '''simple docstring''' if not isinstance(A , A ) and not isinstance(A , A ): lowercase__ = ( "argument should be a bytes-like object or ASCII string, " f'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(A ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(A , A ): try: lowercase__ = encoded_data.decode("utf-8" ) except UnicodeDecodeError: raise ValueError("base64 encoded data should only contain ASCII characters" ) lowercase__ = encoded_data.count("=" ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(A ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one lowercase__ = encoded_data[:-padding] lowercase__ = "".join( bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: lowercase__ = "".join( bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data ) lowercase__ = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(A ) , 8 ) ] return bytes(A ) if __name__ == "__main__": import doctest doctest.testmod()
668
1
"""simple docstring""" import collections import json import os import re from typing import TYPE_CHECKING, List, Optional, Tuple import numpy as np from ...tokenization_utils_fast import PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowerCAmelCase_: List[str] = logging.get_logger(__name__) lowerCAmelCase_: Union[str, Any] = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"} lowerCAmelCase_: int = { "vocab_file": { "abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt", }, "emoji_file": { "abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json", }, } lowerCAmelCase_: Dict = { "abeja/gpt-neox-japanese-2.7b": 2_0_4_8, } def __a ( A , A ): '''simple docstring''' with open(A , "r" , encoding="utf-8" ) as f: lowercase__ = json.loads(f.read() ) lowercase__ = collections.OrderedDict() lowercase__ = collections.OrderedDict() lowercase__ = collections.OrderedDict() with open(A , "r" , encoding="utf-8" ) as f: lowercase__ = f.readlines() lowercase__ = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token] for idx, b in enumerate(A ): lowercase__ = b lowercase__ = idx for wd in b: lowercase__ = idx return vocab, raw_vocab, ids_to_tokens, emoji class a__ ( _a ): snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = ["input_ids", "attention_mask"] def __init__( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase="<|endoftext|>", _UpperCAmelCase="<|endoftext|>", _UpperCAmelCase="<|startoftext|>", _UpperCAmelCase="<|endoftext|>", _UpperCAmelCase=False, **_UpperCAmelCase, ): '''simple docstring''' super().__init__( unk_token=_UpperCAmelCase, pad_token=_UpperCAmelCase, bos_token=_UpperCAmelCase, eos_token=_UpperCAmelCase, do_clean_text=_UpperCAmelCase, **_UpperCAmelCase, ) if not os.path.isfile(_UpperCAmelCase ): raise ValueError( F'''Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained''' " model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) if not os.path.isfile(_UpperCAmelCase ): raise ValueError( F'''Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google''' " pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) lowercase__ = do_clean_text lowercase__ , lowercase__ , lowercase__ , lowercase__ = load_vocab_and_emoji(_UpperCAmelCase, _UpperCAmelCase ) lowercase__ = SubWordJapaneseTokenizer( vocab=self.vocab, ids_to_tokens=self.ids_to_tokens, emoji=self.emoji ) @property def snake_case__ ( self ): '''simple docstring''' return len(self.raw_vocab ) def snake_case__ ( self ): '''simple docstring''' return dict(self.raw_vocab, **self.added_tokens_encoder ) def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' return self.subword_tokenizer.tokenize(_UpperCAmelCase, clean=self.do_clean_text ) def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' return self.vocab.get(_UpperCAmelCase, self.vocab.get(self.unk_token ) ) def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' return self.subword_tokenizer.convert_id_to_token(_UpperCAmelCase ) def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' lowercase__ = "".join(_UpperCAmelCase ).strip() return out_string def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' lowercase__ = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) + [self.eos_token_id] ) if len(_UpperCAmelCase ) > self.model_max_length: lowercase__ = input_ids[-self.model_max_length :] return input_ids def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase = None ): '''simple docstring''' lowercase__ = 0 if os.path.isdir(_UpperCAmelCase ): lowercase__ = os.path.join( _UpperCAmelCase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) lowercase__ = os.path.join( _UpperCAmelCase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] ) else: lowercase__ = ( (filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"] ) lowercase__ = ( (filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"] ) with open(_UpperCAmelCase, "w", encoding="utf-8" ) as writer: for token_index, token in self.ids_to_tokens.items(): if index != token_index: logger.warning( F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.''' " Please check that the vocabulary is not corrupted!" ) lowercase__ = token_index writer.write(",".join(_UpperCAmelCase ) + "\n" ) index += 1 with open(_UpperCAmelCase, "w", encoding="utf-8" ) as writer: json.dump(self.emoji, _UpperCAmelCase ) return vocab_file, emoji_file class a__ ( _a ): def __init__( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' lowercase__ = vocab # same as swe lowercase__ = ids_to_tokens # same as bpe lowercase__ = emoji lowercase__ = np.max([len(_UpperCAmelCase ) for w in self.vocab.keys()] ) lowercase__ = re.compile(R"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" ) lowercase__ = re.compile(R"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" ) lowercase__ = re.compile(R"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" ) lowercase__ = re.compile( R"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" ) lowercase__ = re.compile( R"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" ) lowercase__ = re.compile( R"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" ) lowercase__ = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿" lowercase__ = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟" lowercase__ = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} ) def __len__( self ): '''simple docstring''' return len(self.ids_to_tokens ) def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' lowercase__ = self.content_repattera.sub("<URL>", _UpperCAmelCase ) lowercase__ = self.content_repattera.sub("<EMAIL>", _UpperCAmelCase ) lowercase__ = self.content_repattera.sub("<TEL>", _UpperCAmelCase ) lowercase__ = self.content_repattera.sub("<DATE>", _UpperCAmelCase ) lowercase__ = self.content_repattera.sub("<DATE>", _UpperCAmelCase ) lowercase__ = self.content_repattera.sub("<PRICE>", _UpperCAmelCase ) lowercase__ = content.translate(self.content_transa ) while "<BLOCK><BLOCK>" in content: lowercase__ = content.replace("<BLOCK><BLOCK>", "<BLOCK>" ) return content def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase=False ): '''simple docstring''' lowercase__ = text.replace(" ", "<SP>" ) lowercase__ = text.replace(" ", "<SP>" ) lowercase__ = text.replace("\r\n", "<BR>" ) lowercase__ = text.replace("\n", "<BR>" ) lowercase__ = text.replace("\r", "<BR>" ) lowercase__ = text.replace("\t", "<TAB>" ) lowercase__ = text.replace("—", "ー" ) lowercase__ = text.replace("−", "ー" ) for k, v in self.emoji["emoji"].items(): if k in text: lowercase__ = text.replace(_UpperCAmelCase, _UpperCAmelCase ) if clean: lowercase__ = self.clean_text(_UpperCAmelCase ) def check_simbol(_UpperCAmelCase ): lowercase__ = x.encode() if len(_UpperCAmelCase ) == 1 and len(_UpperCAmelCase ) == 2: lowercase__ = (int(e[0] ) << 8) + int(e[1] ) if ( (c >= 0Xc2a1 and c <= 0Xc2bf) or (c >= 0Xc780 and c <= 0Xc783) or (c >= 0Xcab9 and c <= 0Xcbbf) or (c >= 0Xcc80 and c <= 0Xcda2) ): return True return False def checkuae(_UpperCAmelCase ): lowercase__ = x.encode() if len(_UpperCAmelCase ) == 1 and len(_UpperCAmelCase ) == 3: lowercase__ = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] ) if c >= 0Xe2_8080 and c <= 0Xe2_b07f: return True return False lowercase__ = 0 lowercase__ = [] while pos < len(_UpperCAmelCase ): lowercase__ = min(len(_UpperCAmelCase ), pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3 lowercase__ = [] # (token_id, token, pos) for e in range(_UpperCAmelCase, _UpperCAmelCase, -1 ): lowercase__ = text[pos:e] if wd in self.vocab: if wd[0] == "<" and len(_UpperCAmelCase ) > 2: lowercase__ = [(self.vocab[wd], wd, e)] break else: candidates.append((self.vocab[wd], wd, e) ) if len(_UpperCAmelCase ) > 0: # the smallest token_id is adopted lowercase__ , lowercase__ , lowercase__ = sorted(_UpperCAmelCase, key=lambda _UpperCAmelCase : x[0] )[0] result.append(_UpperCAmelCase ) lowercase__ = e else: lowercase__ = pos + 1 lowercase__ = text[pos:end] if check_simbol(_UpperCAmelCase ): result.append("<KIGOU>" ) elif checkuae(_UpperCAmelCase ): result.append("<U2000U2BFF>" ) else: for i in wd.encode("utf-8" ): result.append("<|byte%d|>" % i ) lowercase__ = end return result def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase="\n" ): '''simple docstring''' lowercase__ = [] lowercase__ = [] lowercase__ = self.ids_to_tokens[index][0] if word[:6] == "<|byte" and word[-2:] == "|>": byte_tokens.append(int(word[6:-2] ) ) else: if len(_UpperCAmelCase ) > 0: words.append(bytearray(_UpperCAmelCase ).decode("utf-8", errors="replace" ) ) lowercase__ = [] if word[:7] == "<|emoji" and word[-2:] == "|>": words.append(self.emoji["emoji_inv"][word] ) elif word == "<SP>": words.append(" " ) elif word == "<BR>": words.append(_UpperCAmelCase ) elif word == "<TAB>": words.append("\t" ) elif word == "<BLOCK>": words.append("▀" ) elif word == "<KIGOU>": words.append("ǀ" ) elif word == "<U2000U2BFF>": words.append("‖" ) else: words.append(_UpperCAmelCase ) if len(_UpperCAmelCase ) > 0: words.append(bytearray(_UpperCAmelCase ).decode("utf-8", errors="replace" ) ) lowercase__ = "".join(_UpperCAmelCase ) return text
668
"""simple docstring""" from sympy import diff, lambdify, symbols from sympy.functions import * # noqa: F403 def __a ( A , A , A = "x" , A = 10**-10 , A = 1 , ): '''simple docstring''' lowercase__ = symbols(A ) lowercase__ = lambdify(A , A ) lowercase__ = lambdify(A , diff(A , A ) ) lowercase__ = starting_point while True: if diff_function(A ) != 0: lowercase__ = prev_guess - multiplicity * func(A ) / diff_function( A ) else: raise ZeroDivisionError("Could not find root" ) from None # Precision is checked by comparing the difference of consecutive guesses if abs(next_guess - prev_guess ) < precision: return next_guess lowercase__ = next_guess # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(F'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}') # Find root of polynomial # Find fourth Root of 5 print(F'The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}') # Find value of e print( "The root of log(y) - 1 = 0 is ", F'{newton_raphson("log(y) - 1", 2, variable="y")}', ) # Exponential Roots print( "The root of exp(x) - 1 = 0 is", F'{newton_raphson("exp(x) - 1", 1_0, precision=0.005)}', ) # Find root of cos(x) print(F'The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}')
668
1
"""simple docstring""" from __future__ import annotations class a__ : def __init__( self, _UpperCAmelCase = 0 ): '''simple docstring''' lowercase__ = key def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' assert isinstance(_UpperCAmelCase, _UpperCAmelCase ) and isinstance(_UpperCAmelCase, _UpperCAmelCase ) lowercase__ = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(_UpperCAmelCase ) ^ key ) for ch in content] def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' assert isinstance(_UpperCAmelCase, _UpperCAmelCase ) and isinstance(_UpperCAmelCase, _UpperCAmelCase ) lowercase__ = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(_UpperCAmelCase ) ^ key ) for ch in content] def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase = 0 ): '''simple docstring''' assert isinstance(_UpperCAmelCase, _UpperCAmelCase ) and isinstance(_UpperCAmelCase, _UpperCAmelCase ) lowercase__ = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned lowercase__ = "" for ch in content: ans += chr(ord(_UpperCAmelCase ) ^ key ) return ans def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase = 0 ): '''simple docstring''' assert isinstance(_UpperCAmelCase, _UpperCAmelCase ) and isinstance(_UpperCAmelCase, _UpperCAmelCase ) lowercase__ = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned lowercase__ = "" for ch in content: ans += chr(ord(_UpperCAmelCase ) ^ key ) return ans def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase = 0 ): '''simple docstring''' assert isinstance(_UpperCAmelCase, _UpperCAmelCase ) and isinstance(_UpperCAmelCase, _UpperCAmelCase ) try: with open(_UpperCAmelCase ) as fin, open("encrypt.out", "w+" ) as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(_UpperCAmelCase, _UpperCAmelCase ) ) except OSError: return False return True def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' assert isinstance(_UpperCAmelCase, _UpperCAmelCase ) and isinstance(_UpperCAmelCase, _UpperCAmelCase ) try: with open(_UpperCAmelCase ) as fin, open("decrypt.out", "w+" ) as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(_UpperCAmelCase, _UpperCAmelCase ) ) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
668
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase_: Union[str, Any] = { "configuration_distilbert": [ "DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DistilBertConfig", "DistilBertOnnxConfig", ], "tokenization_distilbert": ["DistilBertTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_: Union[str, Any] = ["DistilBertTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_: Any = [ "DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "DistilBertForMaskedLM", "DistilBertForMultipleChoice", "DistilBertForQuestionAnswering", "DistilBertForSequenceClassification", "DistilBertForTokenClassification", "DistilBertModel", "DistilBertPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_: Tuple = [ "TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFDistilBertForMaskedLM", "TFDistilBertForMultipleChoice", "TFDistilBertForQuestionAnswering", "TFDistilBertForSequenceClassification", "TFDistilBertForTokenClassification", "TFDistilBertMainLayer", "TFDistilBertModel", "TFDistilBertPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_: Optional[Any] = [ "FlaxDistilBertForMaskedLM", "FlaxDistilBertForMultipleChoice", "FlaxDistilBertForQuestionAnswering", "FlaxDistilBertForSequenceClassification", "FlaxDistilBertForTokenClassification", "FlaxDistilBertModel", "FlaxDistilBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_distilbert import ( DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertOnnxConfig, ) from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel, ) else: import sys lowerCAmelCase_: Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
668
1
"""simple docstring""" import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np import pandas as pd from datasets import load_dataset import transformers from transformers import ( AutoConfig, BartForSequenceClassification, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, TapexTokenizer, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.17.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt") lowerCAmelCase_: int = logging.getLogger(__name__) @dataclass class a__ : snake_case_ = field( default="tab_fact" , metadata={"help": "The name of the dataset to use (via the datasets library)."} ) snake_case_ = field( default="tab_fact" , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} , ) snake_case_ = field( default=1024 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) snake_case_ = field( default=_a , metadata={"help": "Overwrite the cached preprocessed datasets or not."} ) snake_case_ = field( default=_a , metadata={ "help": ( "Whether to pad all samples to `max_seq_length`. " "If False, will pad the samples dynamically when batching to the maximum length in the batch." ) } , ) snake_case_ = field( default=_a , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) snake_case_ = field( default=_a , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } , ) snake_case_ = field( default=_a , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of prediction examples to this " "value if set." ) } , ) snake_case_ = field( default=_a , metadata={"help": "A csv or a json file containing the training data."} ) snake_case_ = field( default=_a , metadata={"help": "A csv or a json file containing the validation data."} ) snake_case_ = field(default=_a , metadata={"help": "A csv or a json file containing the test data."} ) def snake_case__ ( self ): '''simple docstring''' if self.dataset_name is not None: pass elif self.train_file is None or self.validation_file is None: raise ValueError("Need either a GLUE task, a training/validation file or a dataset name." ) else: lowercase__ = self.train_file.split("." )[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." lowercase__ = self.validation_file.split("." )[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class a__ : snake_case_ = field( default=_a , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) snake_case_ = field( default=_a , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) snake_case_ = field( default=_a , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) snake_case_ = field( default=_a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) snake_case_ = field( default=_a , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , ) snake_case_ = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) snake_case_ = field( default=_a , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) def __a ( ): '''simple docstring''' lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowercase__ , lowercase__ , lowercase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowercase__ , lowercase__ , lowercase__ = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) lowercase__ = training_args.get_process_log_level() logger.setLevel(A ) datasets.utils.logging.set_verbosity(A ) transformers.utils.logging.set_verbosity(A ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(f'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. lowercase__ = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: lowercase__ = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. lowercase__ = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. lowercase__ = {"train": data_args.train_file, "validation": data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: lowercase__ = data_args.train_file.split("." )[-1] lowercase__ = data_args.test_file.split("." )[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." lowercase__ = data_args.test_file else: raise ValueError("Need either a GLUE task or a test file for `do_predict`." ) for key in data_files.keys(): logger.info(f'''load a local file for {key}: {data_files[key]}''' ) if data_args.train_file.endswith(".csv" ): # Loading a dataset from local csv files lowercase__ = load_dataset("csv" , data_files=A , cache_dir=model_args.cache_dir ) else: # Loading a dataset from local json files lowercase__ = load_dataset("json" , data_files=A , cache_dir=model_args.cache_dir ) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets.html. # Labels lowercase__ = raw_datasets["train"].features["label"].names lowercase__ = len(A ) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowercase__ = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # load tapex tokenizer lowercase__ = TapexTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=A , ) lowercase__ = BartForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Padding strategy if data_args.pad_to_max_length: lowercase__ = "max_length" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch lowercase__ = False # Some models have set the order of the labels to use, so let's make sure we do use it. lowercase__ = {"Refused": 0, "Entailed": 1} lowercase__ = {0: "Refused", 1: "Entailed"} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the''' f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' ) lowercase__ = min(data_args.max_seq_length , tokenizer.model_max_length ) def preprocess_tabfact_function(A ): # Tokenize the texts def _convert_table_text_to_pandas(A ): lowercase__ = [_table_row.split("#" ) for _table_row in _table_text.strip("\n" ).split("\n" )] lowercase__ = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] ) return _table_pd lowercase__ = examples["statement"] lowercase__ = list(map(_convert_table_text_to_pandas , examples["table_text"] ) ) lowercase__ = tokenizer(A , A , padding=A , max_length=A , truncation=A ) lowercase__ = examples["label"] return result with training_args.main_process_first(desc="dataset map pre-processing" ): lowercase__ = raw_datasets.map( A , batched=A , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on dataset" , ) if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset" ) lowercase__ = raw_datasets["train"] if data_args.max_train_samples is not None: lowercase__ = train_dataset.select(range(data_args.max_train_samples ) ) if training_args.do_eval: if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset" ) lowercase__ = raw_datasets["validation"] if data_args.max_eval_samples is not None: lowercase__ = eval_dataset.select(range(data_args.max_eval_samples ) ) if training_args.do_predict or data_args.test_file is not None: if "test" not in raw_datasets and "test_matched" not in raw_datasets: raise ValueError("--do_predict requires a test dataset" ) lowercase__ = raw_datasets["test"] if data_args.max_predict_samples is not None: lowercase__ = predict_dataset.select(range(data_args.max_predict_samples ) ) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(A ) ) , 3 ): logger.info(f'''Sample {index} of the training set: {train_dataset[index]}.''' ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(A ): lowercase__ = p.predictions[0] if isinstance(p.predictions , A ) else p.predictions lowercase__ = np.argmax(A , axis=1 ) return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()} # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: lowercase__ = default_data_collator elif training_args.fpaa: lowercase__ = DataCollatorWithPadding(A , pad_to_multiple_of=8 ) else: lowercase__ = None # Initialize our Trainer lowercase__ = Trainer( model=A , args=A , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=A , tokenizer=A , data_collator=A , ) # Training if training_args.do_train: lowercase__ = None if training_args.resume_from_checkpoint is not None: lowercase__ = training_args.resume_from_checkpoint elif last_checkpoint is not None: lowercase__ = last_checkpoint lowercase__ = trainer.train(resume_from_checkpoint=A ) lowercase__ = train_result.metrics lowercase__ = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(A ) ) lowercase__ = min(A , len(A ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics("train" , A ) trainer.save_metrics("train" , A ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***" ) lowercase__ = trainer.evaluate(eval_dataset=A ) lowercase__ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(A ) lowercase__ = min(A , len(A ) ) trainer.log_metrics("eval" , A ) trainer.save_metrics("eval" , A ) if training_args.do_predict: logger.info("*** Predict ***" ) # Removing the `label` columns because it contains -1 and Trainer won't like that. lowercase__ = predict_dataset.remove_columns("label" ) lowercase__ = trainer.predict(A , metric_key_prefix="predict" ).predictions lowercase__ = np.argmax(A , axis=1 ) lowercase__ = os.path.join(training_args.output_dir , "predict_results_tabfact.txt" ) if trainer.is_world_process_zero(): with open(A , "w" ) as writer: logger.info("***** Predict Results *****" ) writer.write("index\tprediction\n" ) for index, item in enumerate(A ): lowercase__ = label_list[item] writer.write(f'''{index}\t{item}\n''' ) lowercase__ = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"} if training_args.push_to_hub: trainer.push_to_hub(**A ) else: trainer.create_model_card(**A ) def __a ( A ): '''simple docstring''' main() if __name__ == "__main__": main()
668
"""simple docstring""" from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging lowerCAmelCase_: Union[str, Any] = logging.get_logger(__name__) class a__ ( _a ): snake_case_ = ["audio_values", "audio_mask"] def __init__( self, _UpperCAmelCase=2048, _UpperCAmelCase=1, _UpperCAmelCase=[16, 16], _UpperCAmelCase=128, _UpperCAmelCase=4_4100, _UpperCAmelCase=86, _UpperCAmelCase=2048, _UpperCAmelCase=0.0, **_UpperCAmelCase, ): '''simple docstring''' super().__init__( feature_size=_UpperCAmelCase, sampling_rate=_UpperCAmelCase, padding_value=_UpperCAmelCase, **_UpperCAmelCase, ) lowercase__ = spectrogram_length lowercase__ = num_channels lowercase__ = patch_size lowercase__ = feature_size // self.patch_size[1] lowercase__ = n_fft lowercase__ = sampling_rate // hop_length_to_sampling_rate lowercase__ = sampling_rate lowercase__ = padding_value lowercase__ = mel_filter_bank( num_frequency_bins=1 + n_fft // 2, num_mel_filters=_UpperCAmelCase, min_frequency=0.0, max_frequency=22_050.0, sampling_rate=_UpperCAmelCase, norm="slaney", mel_scale="slaney", ).T def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' lowercase__ = spectrogram( _UpperCAmelCase, window_function(self.n_fft, "hann" ), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters.T, log_mel="dB", db_range=80.0, ) lowercase__ = log_spec[:, :-1] lowercase__ = log_spec - 20.0 lowercase__ = np.clip(log_spec / 40.0, -2.0, 0.0 ) + 1.0 return log_spec def __call__( self, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = True, _UpperCAmelCase = None, _UpperCAmelCase = False, _UpperCAmelCase = False, **_UpperCAmelCase, ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( "This feature extractor is set to support sampling rate" F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled''' F''' with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) lowercase__ = isinstance(_UpperCAmelCase, np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) lowercase__ = is_batched_numpy or ( isinstance(_UpperCAmelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) )) ) if is_batched: lowercase__ = [np.asarray([speech], dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(_UpperCAmelCase, np.ndarray ): lowercase__ = np.asarray(_UpperCAmelCase, dtype=np.floataa ) elif isinstance(_UpperCAmelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowercase__ = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowercase__ = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis lowercase__ = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0], _UpperCAmelCase ): lowercase__ = [np.asarray(_UpperCAmelCase, dtype=np.floataa ) for feature in audio_features] # Create audio attention mask lowercase__ = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: lowercase__ = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] lowercase__ = np.array(_UpperCAmelCase ).astype(np.floataa ) # convert into correct format for padding lowercase__ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch lowercase__ = np.ones([len(_UpperCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) lowercase__ = padded_audio_features * self.padding_value for i in range(len(_UpperCAmelCase ) ): lowercase__ = audio_features[i] lowercase__ = feature # return as BatchFeature if return_attention_mask: lowercase__ = {"audio_values": padded_audio_features, "audio_mask": audio_mask} else: lowercase__ = {"audio_values": padded_audio_features} lowercase__ = BatchFeature(data=_UpperCAmelCase, tensor_type=_UpperCAmelCase ) return encoded_inputs
668
1
"""simple docstring""" import os from distutils.util import strtobool def __a ( A , A ): '''simple docstring''' for e in env_keys: lowercase__ = int(os.environ.get(A , -1 ) ) if val >= 0: return val return default def __a ( A , A=False ): '''simple docstring''' lowercase__ = os.environ.get(A , str(A ) ) return strtobool(A ) == 1 # As its name indicates `strtobool` actually returns an int... def __a ( A , A="no" ): '''simple docstring''' lowercase__ = os.environ.get(A , str(A ) ) return value
668
"""simple docstring""" from __future__ import annotations import math def __a ( A ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True lowerCAmelCase_: Optional[Any] = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)] def __a ( A ): '''simple docstring''' if not isinstance(A , A ): raise ValueError("n must be an integer" ) if n <= 0: raise ValueError("n must be >= 0" ) lowercase__ = [] for num in range(len(A ) ): lowercase__ = 0 while 2 * i * i <= odd_composites[num]: lowercase__ = odd_composites[num] - 2 * i * i if is_prime(A ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(A ) == n: return list_nums return [] def __a ( ): '''simple docstring''' return compute_nums(1 )[0] if __name__ == "__main__": print(F'{solution() = }')
668
1
"""simple docstring""" from math import isqrt def __a ( A ): '''simple docstring''' return all(number % divisor != 0 for divisor in range(2 , isqrt(A ) + 1 ) ) def __a ( A = 10**6 ): '''simple docstring''' lowercase__ = 0 lowercase__ = 1 lowercase__ = 7 while prime_candidate < max_prime: primes_count += is_prime(A ) cube_index += 1 prime_candidate += 6 * cube_index return primes_count if __name__ == "__main__": print(F'{solution() = }')
668
"""simple docstring""" import os import sys lowerCAmelCase_: Any = os.path.join(os.path.dirname(__file__), "src") sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) lowerCAmelCase_: Union[str, Any] = [ "torch", "numpy", "tokenizers", "filelock", "requests", "tqdm", "regex", "sentencepiece", "sacremoses", "importlib_metadata", "huggingface_hub", ] @add_start_docstrings(AutoConfig.__doc__ ) def __a ( *A , **A ): '''simple docstring''' return AutoConfig.from_pretrained(*A , **A ) @add_start_docstrings(AutoTokenizer.__doc__ ) def __a ( *A , **A ): '''simple docstring''' return AutoTokenizer.from_pretrained(*A , **A ) @add_start_docstrings(AutoModel.__doc__ ) def __a ( *A , **A ): '''simple docstring''' return AutoModel.from_pretrained(*A , **A ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def __a ( *A , **A ): '''simple docstring''' return AutoModelForCausalLM.from_pretrained(*A , **A ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def __a ( *A , **A ): '''simple docstring''' return AutoModelForMaskedLM.from_pretrained(*A , **A ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def __a ( *A , **A ): '''simple docstring''' return AutoModelForSequenceClassification.from_pretrained(*A , **A ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def __a ( *A , **A ): '''simple docstring''' return AutoModelForQuestionAnswering.from_pretrained(*A , **A )
668
1
"""simple docstring""" from math import isqrt def __a ( A ): '''simple docstring''' lowercase__ = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , A , A ): lowercase__ = False return [i for i in range(2 , A ) if is_prime[i]] def __a ( A = 10**8 ): '''simple docstring''' lowercase__ = calculate_prime_numbers(max_number // 2 ) lowercase__ = 0 lowercase__ = 0 lowercase__ = len(A ) - 1 while left <= right: while prime_numbers[left] * prime_numbers[right] >= max_number: right -= 1 semiprimes_count += right - left + 1 left += 1 return semiprimes_count if __name__ == "__main__": print(F'{solution() = }')
668
"""simple docstring""" import unittest from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow if is_flax_available(): import jax from transformers.models.auto.modeling_flax_auto import FlaxAutoModel from transformers.models.bert.modeling_flax_bert import FlaxBertModel from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel @require_flax class a__ ( unittest.TestCase ): @slow def snake_case__ ( self ): '''simple docstring''' for model_name in ["bert-base-cased", "bert-large-uncased"]: with self.subTest(_UpperCAmelCase ): lowercase__ = AutoConfig.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase ) lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase ) @slow def snake_case__ ( self ): '''simple docstring''' for model_name in ["roberta-base", "roberta-large"]: with self.subTest(_UpperCAmelCase ): lowercase__ = AutoConfig.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase ) lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase ) @slow def snake_case__ ( self ): '''simple docstring''' for model_name in ["bert-base-cased", "bert-large-uncased"]: lowercase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase ) lowercase__ = FlaxBertModel.from_pretrained(_UpperCAmelCase ) lowercase__ = tokenizer("Do you support jax jitted function?", return_tensors=TensorType.JAX ) @jax.jit def eval(**_UpperCAmelCase ): return model(**_UpperCAmelCase ) eval(**_UpperCAmelCase ).block_until_ready() @slow def snake_case__ ( self ): '''simple docstring''' for model_name in ["roberta-base", "roberta-large"]: lowercase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase ) lowercase__ = FlaxRobertaModel.from_pretrained(_UpperCAmelCase ) lowercase__ = tokenizer("Do you support jax jitted function?", return_tensors=TensorType.JAX ) @jax.jit def eval(**_UpperCAmelCase ): return model(**_UpperCAmelCase ) eval(**_UpperCAmelCase ).block_until_ready() def snake_case__ ( self ): '''simple docstring''' with self.assertRaisesRegex( _UpperCAmelCase, "bert-base is not a local folder and is not a valid model identifier" ): lowercase__ = FlaxAutoModel.from_pretrained("bert-base" ) def snake_case__ ( self ): '''simple docstring''' with self.assertRaisesRegex( _UpperCAmelCase, R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase, revision="aaaaaa" ) def snake_case__ ( self ): '''simple docstring''' with self.assertRaisesRegex( _UpperCAmelCase, "hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack", ): lowercase__ = FlaxAutoModel.from_pretrained("hf-internal-testing/config-no-model" ) def snake_case__ ( self ): '''simple docstring''' with self.assertRaisesRegex(_UpperCAmelCase, "Use `from_pt=True` to load this model" ): lowercase__ = FlaxAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
668
1
"""simple docstring""" def __a ( A ): '''simple docstring''' lowercase__ = False while is_sorted is False: # Until all the indices are traversed keep looping lowercase__ = True for i in range(0 , len(A ) - 1 , 2 ): # iterating over all even indices if input_list[i] > input_list[i + 1]: lowercase__ , lowercase__ = input_list[i + 1], input_list[i] # swapping if elements not in order lowercase__ = False for i in range(1 , len(A ) - 1 , 2 ): # iterating over all odd indices if input_list[i] > input_list[i + 1]: lowercase__ , lowercase__ = input_list[i + 1], input_list[i] # swapping if elements not in order lowercase__ = False return input_list if __name__ == "__main__": print("Enter list to be sorted") lowerCAmelCase_: Tuple = [int(x) for x in input().split()] # inputing elements of the list in one line lowerCAmelCase_: List[Any] = odd_even_sort(input_list) print("The sorted list is") print(sorted_list)
668
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase_: str = logging.get_logger(__name__) lowerCAmelCase_: List[Any] = { "facebook/data2vec-vision-base-ft": ( "https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json" ), } class a__ ( _a ): snake_case_ = "data2vec-vision" def __init__( self, _UpperCAmelCase=768, _UpperCAmelCase=12, _UpperCAmelCase=12, _UpperCAmelCase=3072, _UpperCAmelCase="gelu", _UpperCAmelCase=0.0, _UpperCAmelCase=0.0, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-12, _UpperCAmelCase=224, _UpperCAmelCase=16, _UpperCAmelCase=3, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=True, _UpperCAmelCase=[3, 5, 7, 11], _UpperCAmelCase=[1, 2, 3, 6], _UpperCAmelCase=True, _UpperCAmelCase=0.4, _UpperCAmelCase=256, _UpperCAmelCase=1, _UpperCAmelCase=False, _UpperCAmelCase=255, **_UpperCAmelCase, ): '''simple docstring''' super().__init__(**_UpperCAmelCase ) lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = initializer_range lowercase__ = layer_norm_eps lowercase__ = image_size lowercase__ = patch_size lowercase__ = num_channels lowercase__ = use_mask_token lowercase__ = use_absolute_position_embeddings lowercase__ = use_relative_position_bias lowercase__ = use_shared_relative_position_bias lowercase__ = layer_scale_init_value lowercase__ = drop_path_rate lowercase__ = use_mean_pooling # decode head attributes (semantic segmentation) lowercase__ = out_indices lowercase__ = pool_scales # auxiliary head attributes (semantic segmentation) lowercase__ = use_auxiliary_head lowercase__ = auxiliary_loss_weight lowercase__ = auxiliary_channels lowercase__ = auxiliary_num_convs lowercase__ = auxiliary_concat_input lowercase__ = semantic_loss_ignore_index class a__ ( _a ): snake_case_ = version.parse("1.11" ) @property def snake_case__ ( self ): '''simple docstring''' return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def snake_case__ ( self ): '''simple docstring''' return 1E-4
668
1
"""simple docstring""" import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available from . import BaseDiffusersCLICommand def __a ( A ): '''simple docstring''' return EnvironmentCommand() class a__ ( _a ): @staticmethod def snake_case__ ( _UpperCAmelCase ): '''simple docstring''' lowercase__ = parser.add_parser("env" ) download_parser.set_defaults(func=_UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = huggingface_hub.__version__ lowercase__ = "not installed" lowercase__ = "NA" if is_torch_available(): import torch lowercase__ = torch.__version__ lowercase__ = torch.cuda.is_available() lowercase__ = "not installed" if is_transformers_available(): import transformers lowercase__ = transformers.__version__ lowercase__ = "not installed" if is_accelerate_available(): import accelerate lowercase__ = accelerate.__version__ lowercase__ = "not installed" if is_xformers_available(): import xformers lowercase__ = xformers.__version__ lowercase__ = { "`diffusers` version": version, "Platform": platform.platform(), "Python version": platform.python_version(), "PyTorch version (GPU?)": F'''{pt_version} ({pt_cuda_available})''', "Huggingface_hub version": hub_version, "Transformers version": transformers_version, "Accelerate version": accelerate_version, "xFormers version": xformers_version, "Using GPU in script?": "<fill in>", "Using distributed or parallel set-up in script?": "<fill in>", } print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" ) print(self.format_dict(_UpperCAmelCase ) ) return info @staticmethod def snake_case__ ( _UpperCAmelCase ): '''simple docstring''' return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
668
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_: List[Any] = logging.get_logger(__name__) lowerCAmelCase_: int = { "microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json", "microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json", } class a__ ( _a ): snake_case_ = "markuplm" def __init__( self, _UpperCAmelCase=3_0522, _UpperCAmelCase=768, _UpperCAmelCase=12, _UpperCAmelCase=12, _UpperCAmelCase=3072, _UpperCAmelCase="gelu", _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=512, _UpperCAmelCase=2, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-12, _UpperCAmelCase=0, _UpperCAmelCase=0, _UpperCAmelCase=2, _UpperCAmelCase=256, _UpperCAmelCase=1024, _UpperCAmelCase=216, _UpperCAmelCase=1001, _UpperCAmelCase=32, _UpperCAmelCase=50, _UpperCAmelCase="absolute", _UpperCAmelCase=True, _UpperCAmelCase=None, **_UpperCAmelCase, ): '''simple docstring''' super().__init__( pad_token_id=_UpperCAmelCase, bos_token_id=_UpperCAmelCase, eos_token_id=_UpperCAmelCase, **_UpperCAmelCase, ) lowercase__ = vocab_size lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = hidden_act lowercase__ = intermediate_size lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = max_position_embeddings lowercase__ = type_vocab_size lowercase__ = initializer_range lowercase__ = layer_norm_eps lowercase__ = position_embedding_type lowercase__ = use_cache lowercase__ = classifier_dropout # additional properties lowercase__ = max_depth lowercase__ = max_xpath_tag_unit_embeddings lowercase__ = max_xpath_subs_unit_embeddings lowercase__ = tag_pad_id lowercase__ = subs_pad_id lowercase__ = xpath_unit_hidden_size
668
1
"""simple docstring""" import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def __a ( A ): '''simple docstring''' if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class a__ ( nn.Module ): def __init__( self, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' super().__init__() lowercase__ = module lowercase__ = nn.Sequential( nn.Linear(module.in_features, _UpperCAmelCase, bias=_UpperCAmelCase ), nn.Linear(_UpperCAmelCase, module.out_features, bias=_UpperCAmelCase ), ) lowercase__ = (2.0 / (5 * min(module.in_features, module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight, std=_UpperCAmelCase ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def snake_case__ ( self, _UpperCAmelCase, *_UpperCAmelCase, **_UpperCAmelCase ): '''simple docstring''' return self.module(_UpperCAmelCase, *_UpperCAmelCase, **_UpperCAmelCase ) + self.adapter(_UpperCAmelCase ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class a__ ( unittest.TestCase ): # We keep the constants inside the init function and model loading inside setUp function # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) # Therefore here we use only bloom-1b3 to test our module snake_case_ = "bigscience/bloom-1b7" # Constant values snake_case_ = 2.109659552692574 snake_case_ = "Hello my name is" snake_case_ = set() EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" ) EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" ) EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" ) snake_case_ = 10 def snake_case__ ( self ): '''simple docstring''' lowercase__ = AutoTokenizer.from_pretrained(self.model_name ) class a__ ( _a ): def snake_case__ ( self ): '''simple docstring''' super().setUp() # Models and tokenizer lowercase__ = AutoModelForCausalLM.from_pretrained( self.model_name, torch_dtype=torch.floataa, device_map="auto" ) lowercase__ = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_abit=_UpperCAmelCase, device_map="auto" ) def snake_case__ ( self ): '''simple docstring''' del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.model_abit.config self.assertTrue(hasattr(_UpperCAmelCase, "quantization_config" ) ) lowercase__ = config.to_dict() lowercase__ = config.to_diff_dict() lowercase__ = config.to_json_string() def snake_case__ ( self ): '''simple docstring''' from bitsandbytes.nn import Paramsabit lowercase__ = self.model_fpaa.get_memory_footprint() lowercase__ = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit, self.EXPECTED_RELATIVE_DIFFERENCE ) lowercase__ = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def snake_case__ ( self ): '''simple docstring''' from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(_UpperCAmelCase, torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.tokenizer(self.input_text, return_tensors="pt" ) lowercase__ = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ), max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=_UpperCAmelCase ), self.EXPECTED_OUTPUTS ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = BitsAndBytesConfig() lowercase__ = True lowercase__ = AutoModelForCausalLM.from_pretrained( self.model_name, quantization_config=_UpperCAmelCase, device_map="auto" ) lowercase__ = self.tokenizer(self.input_text, return_tensors="pt" ) lowercase__ = model_abit_from_config.generate( input_ids=encoded_input["input_ids"].to(0 ), max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=_UpperCAmelCase ), self.EXPECTED_OUTPUTS ) def snake_case__ ( self ): '''simple docstring''' with self.assertRaises(_UpperCAmelCase ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(_UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = BitsAndBytesConfig() with self.assertRaises(_UpperCAmelCase ): lowercase__ = AutoModelForCausalLM.from_pretrained( self.model_name, quantization_config=_UpperCAmelCase, load_in_abit=_UpperCAmelCase, device_map="auto", bnb_abit_quant_type="nf4", ) def snake_case__ ( self ): '''simple docstring''' with self.assertRaises(_UpperCAmelCase ): # Tries with `str` self.model_abit.to("cpu" ) with self.assertRaises(_UpperCAmelCase ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(_UpperCAmelCase ): # Tries with a `device` self.model_abit.to(torch.device("cuda:0" ) ) with self.assertRaises(_UpperCAmelCase ): # Tries with a `device` self.model_abit.float() with self.assertRaises(_UpperCAmelCase ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything lowercase__ = self.tokenizer(self.input_text, return_tensors="pt" ) lowercase__ = self.model_fpaa.to(torch.floataa ) lowercase__ = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ), max_new_tokens=10 ) # Check this does not throw an error lowercase__ = self.model_fpaa.to("cpu" ) # Check this does not throw an error lowercase__ = self.model_fpaa.half() # Check this does not throw an error lowercase__ = self.model_fpaa.float() def snake_case__ ( self ): '''simple docstring''' lowercase__ = AutoModelForSeqaSeqLM.from_pretrained("t5-small", load_in_abit=_UpperCAmelCase, device_map="auto" ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class a__ ( unittest.TestCase ): @classmethod def snake_case__ ( cls ): '''simple docstring''' lowercase__ = "t5-small" lowercase__ = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense lowercase__ = AutoTokenizer.from_pretrained(cls.model_name ) lowercase__ = "Translate in German: Hello, my dog is cute" def snake_case__ ( self ): '''simple docstring''' gc.collect() torch.cuda.empty_cache() def snake_case__ ( self ): '''simple docstring''' from transformers import TaForConditionalGeneration lowercase__ = TaForConditionalGeneration._keep_in_fpaa_modules lowercase__ = None # test with `t5-small` lowercase__ = TaForConditionalGeneration.from_pretrained(self.model_name, load_in_abit=_UpperCAmelCase, device_map="auto" ) lowercase__ = self.tokenizer(self.input_text, return_tensors="pt" ).to(0 ) lowercase__ = model.generate(**_UpperCAmelCase ) # test with `flan-t5-small` lowercase__ = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name, load_in_abit=_UpperCAmelCase, device_map="auto" ) lowercase__ = self.tokenizer(self.input_text, return_tensors="pt" ).to(0 ) lowercase__ = model.generate(**_UpperCAmelCase ) lowercase__ = modules def snake_case__ ( self ): '''simple docstring''' import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` lowercase__ = TaForConditionalGeneration.from_pretrained(self.model_name, load_in_abit=_UpperCAmelCase, device_map="auto" ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q, bnb.nn.Linearabit ) ) lowercase__ = self.tokenizer(self.input_text, return_tensors="pt" ).to(0 ) lowercase__ = model.generate(**_UpperCAmelCase ) # test with `flan-t5-small` lowercase__ = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name, load_in_abit=_UpperCAmelCase, device_map="auto" ) lowercase__ = self.tokenizer(self.input_text, return_tensors="pt" ).to(0 ) lowercase__ = model.generate(**_UpperCAmelCase ) class a__ ( _a ): def snake_case__ ( self ): '''simple docstring''' super().setUp() # model_name lowercase__ = "bigscience/bloom-560m" lowercase__ = "t5-small" # Different types of model lowercase__ = AutoModel.from_pretrained(self.model_name, load_in_abit=_UpperCAmelCase, device_map="auto" ) # Sequence classification model lowercase__ = AutoModelForSequenceClassification.from_pretrained( self.model_name, load_in_abit=_UpperCAmelCase, device_map="auto" ) # CausalLM model lowercase__ = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_abit=_UpperCAmelCase, device_map="auto" ) # Seq2seq model lowercase__ = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name, load_in_abit=_UpperCAmelCase, device_map="auto" ) def snake_case__ ( self ): '''simple docstring''' del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def snake_case__ ( self ): '''simple docstring''' from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class a__ ( _a ): def snake_case__ ( self ): '''simple docstring''' super().setUp() def snake_case__ ( self ): '''simple docstring''' del self.pipe gc.collect() torch.cuda.empty_cache() def snake_case__ ( self ): '''simple docstring''' lowercase__ = pipeline( "text-generation", model=self.model_name, model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa}, max_new_tokens=self.MAX_NEW_TOKENS, ) # Real second forward pass lowercase__ = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]["generated_text"], self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class a__ ( _a ): def snake_case__ ( self ): '''simple docstring''' super().setUp() def snake_case__ ( self ): '''simple docstring''' lowercase__ = AutoModelForCausalLM.from_pretrained( self.model_name, load_in_abit=_UpperCAmelCase, device_map="balanced" ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ), {0, 1} ) # Check that inference pass works on the model lowercase__ = self.tokenizer(self.input_text, return_tensors="pt" ) # Second real batch lowercase__ = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ), max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0], skip_special_tokens=_UpperCAmelCase ), self.EXPECTED_OUTPUTS ) class a__ ( _a ): def snake_case__ ( self ): '''simple docstring''' lowercase__ = "facebook/opt-350m" super().setUp() def snake_case__ ( self ): '''simple docstring''' if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ): return # Step 1: freeze all parameters lowercase__ = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_abit=_UpperCAmelCase ) self.assertEqual(set(model.hf_device_map.values() ), {torch.cuda.current_device()} ) for param in model.parameters(): lowercase__ = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability lowercase__ = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(_UpperCAmelCase ) ): lowercase__ = LoRALayer(module.q_proj, rank=16 ) lowercase__ = LoRALayer(module.k_proj, rank=16 ) lowercase__ = LoRALayer(module.v_proj, rank=16 ) # Step 3: dummy batch lowercase__ = self.tokenizer("Test batch ", return_tensors="pt" ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): lowercase__ = model.forward(**_UpperCAmelCase ) out.logits.norm().backward() for module in model.modules(): if isinstance(_UpperCAmelCase, _UpperCAmelCase ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(_UpperCAmelCase, nn.Embedding ): self.assertTrue(module.weight.grad is None ) class a__ ( _a ): snake_case_ = "gpt2-xl" snake_case_ = 3.3191854854152187
668
"""simple docstring""" lowerCAmelCase_: Union[str, Any] = [ 9_9_9, 8_0_0, 7_9_9, 6_0_0, 5_9_9, 5_0_0, 4_0_0, 3_9_9, 3_7_7, 3_5_5, 3_3_3, 3_1_1, 2_8_8, 2_6_6, 2_4_4, 2_2_2, 2_0_0, 1_9_9, 1_7_7, 1_5_5, 1_3_3, 1_1_1, 8_8, 6_6, 4_4, 2_2, 0, ] lowerCAmelCase_: List[str] = [ 9_9_9, 9_7_6, 9_5_2, 9_2_8, 9_0_5, 8_8_2, 8_5_8, 8_5_7, 8_1_0, 7_6_2, 7_1_5, 7_1_4, 5_7_2, 4_2_9, 4_2_8, 2_8_6, 2_8_5, 2_3_8, 1_9_0, 1_4_3, 1_4_2, 1_1_8, 9_5, 7_1, 4_7, 2_4, 0, ] lowerCAmelCase_: List[str] = [ 9_9_9, 9_8_8, 9_7_7, 9_6_6, 9_5_5, 9_4_4, 9_3_3, 9_2_2, 9_1_1, 9_0_0, 8_9_9, 8_7_9, 8_5_9, 8_4_0, 8_2_0, 8_0_0, 7_9_9, 7_6_6, 7_3_3, 7_0_0, 6_9_9, 6_5_0, 6_0_0, 5_9_9, 5_0_0, 4_9_9, 4_0_0, 3_9_9, 3_5_0, 3_0_0, 2_9_9, 2_6_6, 2_3_3, 2_0_0, 1_9_9, 1_7_9, 1_5_9, 1_4_0, 1_2_0, 1_0_0, 9_9, 8_8, 7_7, 6_6, 5_5, 4_4, 3_3, 2_2, 1_1, 0, ] lowerCAmelCase_: Dict = [ 9_9_9, 9_9_5, 9_9_2, 9_8_9, 9_8_5, 9_8_1, 9_7_8, 9_7_5, 9_7_1, 9_6_7, 9_6_4, 9_6_1, 9_5_7, 9_5_6, 9_5_1, 9_4_7, 9_4_2, 9_3_7, 9_3_3, 9_2_8, 9_2_3, 9_1_9, 9_1_4, 9_1_3, 9_0_8, 9_0_3, 8_9_7, 8_9_2, 8_8_7, 8_8_1, 8_7_6, 8_7_1, 8_7_0, 8_6_4, 8_5_8, 8_5_2, 8_4_6, 8_4_0, 8_3_4, 8_2_8, 8_2_7, 8_2_0, 8_1_3, 8_0_6, 7_9_9, 7_9_2, 7_8_5, 7_8_4, 7_7_7, 7_7_0, 7_6_3, 7_5_6, 7_4_9, 7_4_2, 7_4_1, 7_3_3, 7_2_4, 7_1_6, 7_0_7, 6_9_9, 6_9_8, 6_8_8, 6_7_7, 6_6_6, 6_5_6, 6_5_5, 6_4_5, 6_3_4, 6_2_3, 6_1_3, 6_1_2, 5_9_8, 5_8_4, 5_7_0, 5_6_9, 5_5_5, 5_4_1, 5_2_7, 5_2_6, 5_0_5, 4_8_4, 4_8_3, 4_6_2, 4_4_0, 4_3_9, 3_9_6, 3_9_5, 3_5_2, 3_5_1, 3_0_8, 3_0_7, 2_6_4, 2_6_3, 2_2_0, 2_1_9, 1_7_6, 1_3_2, 8_8, 4_4, 0, ] lowerCAmelCase_: Optional[int] = [ 9_9_9, 9_9_7, 9_9_5, 9_9_2, 9_9_0, 9_8_8, 9_8_6, 9_8_4, 9_8_1, 9_7_9, 9_7_7, 9_7_5, 9_7_2, 9_7_0, 9_6_8, 9_6_6, 9_6_4, 9_6_1, 9_5_9, 9_5_7, 9_5_6, 9_5_4, 9_5_1, 9_4_9, 9_4_6, 9_4_4, 9_4_1, 9_3_9, 9_3_6, 9_3_4, 9_3_1, 9_2_9, 9_2_6, 9_2_4, 9_2_1, 9_1_9, 9_1_6, 9_1_4, 9_1_3, 9_1_0, 9_0_7, 9_0_5, 9_0_2, 8_9_9, 8_9_6, 8_9_3, 8_9_1, 8_8_8, 8_8_5, 8_8_2, 8_7_9, 8_7_7, 8_7_4, 8_7_1, 8_7_0, 8_6_7, 8_6_4, 8_6_1, 8_5_8, 8_5_5, 8_5_2, 8_4_9, 8_4_6, 8_4_3, 8_4_0, 8_3_7, 8_3_4, 8_3_1, 8_2_8, 8_2_7, 8_2_4, 8_2_1, 8_1_7, 8_1_4, 8_1_1, 8_0_8, 8_0_4, 8_0_1, 7_9_8, 7_9_5, 7_9_1, 7_8_8, 7_8_5, 7_8_4, 7_8_0, 7_7_7, 7_7_4, 7_7_0, 7_6_6, 7_6_3, 7_6_0, 7_5_6, 7_5_2, 7_4_9, 7_4_6, 7_4_2, 7_4_1, 7_3_7, 7_3_3, 7_3_0, 7_2_6, 7_2_2, 7_1_8, 7_1_4, 7_1_0, 7_0_7, 7_0_3, 6_9_9, 6_9_8, 6_9_4, 6_9_0, 6_8_5, 6_8_1, 6_7_7, 6_7_3, 6_6_9, 6_6_4, 6_6_0, 6_5_6, 6_5_5, 6_5_0, 6_4_6, 6_4_1, 6_3_6, 6_3_2, 6_2_7, 6_2_2, 6_1_8, 6_1_3, 6_1_2, 6_0_7, 6_0_2, 5_9_6, 5_9_1, 5_8_6, 5_8_0, 5_7_5, 5_7_0, 5_6_9, 5_6_3, 5_5_7, 5_5_1, 5_4_5, 5_3_9, 5_3_3, 5_2_7, 5_2_6, 5_1_9, 5_1_2, 5_0_5, 4_9_8, 4_9_1, 4_8_4, 4_8_3, 4_7_4, 4_6_6, 4_5_7, 4_4_9, 4_4_0, 4_3_9, 4_2_8, 4_1_8, 4_0_7, 3_9_6, 3_9_5, 3_8_1, 3_6_6, 3_5_2, 3_5_1, 3_3_0, 3_0_8, 3_0_7, 2_8_6, 2_6_4, 2_6_3, 2_4_2, 2_2_0, 2_1_9, 1_7_6, 1_7_5, 1_3_2, 1_3_1, 8_8, 4_4, 0, ] lowerCAmelCase_: Tuple = [ 9_9_9, 9_9_1, 9_8_2, 9_7_4, 9_6_6, 9_5_8, 9_5_0, 9_4_1, 9_3_3, 9_2_5, 9_1_6, 9_0_8, 9_0_0, 8_9_9, 8_7_4, 8_5_0, 8_2_5, 8_0_0, 7_9_9, 7_0_0, 6_0_0, 5_0_0, 4_0_0, 3_0_0, 2_0_0, 1_0_0, 0, ] lowerCAmelCase_: str = [ 9_9_9, 9_9_2, 9_8_5, 9_7_8, 9_7_1, 9_6_4, 9_5_7, 9_4_9, 9_4_2, 9_3_5, 9_2_8, 9_2_1, 9_1_4, 9_0_7, 9_0_0, 8_9_9, 8_7_9, 8_5_9, 8_4_0, 8_2_0, 8_0_0, 7_9_9, 7_6_6, 7_3_3, 7_0_0, 6_9_9, 6_5_0, 6_0_0, 5_9_9, 5_0_0, 4_9_9, 4_0_0, 3_9_9, 3_0_0, 2_9_9, 2_0_0, 1_9_9, 1_0_0, 9_9, 0, ] lowerCAmelCase_: int = [ 9_9_9, 9_9_6, 9_9_2, 9_8_9, 9_8_5, 9_8_2, 9_7_9, 9_7_5, 9_7_2, 9_6_8, 9_6_5, 9_6_1, 9_5_8, 9_5_5, 9_5_1, 9_4_8, 9_4_4, 9_4_1, 9_3_8, 9_3_4, 9_3_1, 9_2_7, 9_2_4, 9_2_0, 9_1_7, 9_1_4, 9_1_0, 9_0_7, 9_0_3, 9_0_0, 8_9_9, 8_9_1, 8_8_4, 8_7_6, 8_6_9, 8_6_1, 8_5_3, 8_4_6, 8_3_8, 8_3_0, 8_2_3, 8_1_5, 8_0_8, 8_0_0, 7_9_9, 7_8_8, 7_7_7, 7_6_6, 7_5_5, 7_4_4, 7_3_3, 7_2_2, 7_1_1, 7_0_0, 6_9_9, 6_8_8, 6_7_7, 6_6_6, 6_5_5, 6_4_4, 6_3_3, 6_2_2, 6_1_1, 6_0_0, 5_9_9, 5_8_5, 5_7_1, 5_5_7, 5_4_2, 5_2_8, 5_1_4, 5_0_0, 4_9_9, 4_8_5, 4_7_1, 4_5_7, 4_4_2, 4_2_8, 4_1_4, 4_0_0, 3_9_9, 3_7_9, 3_5_9, 3_4_0, 3_2_0, 3_0_0, 2_9_9, 2_7_9, 2_5_9, 2_4_0, 2_2_0, 2_0_0, 1_9_9, 1_6_6, 1_3_3, 1_0_0, 9_9, 6_6, 3_3, 0, ]
668
1
"""simple docstring""" import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class a__ ( _a ): snake_case_ = "char" snake_case_ = "bpe" snake_case_ = "wp" lowerCAmelCase_: str = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class a__ ( _a ): snake_case_ = ["image_processor", "char_tokenizer"] snake_case_ = "ViTImageProcessor" snake_case_ = "MgpstrTokenizer" def __init__( self, _UpperCAmelCase=None, _UpperCAmelCase=None, **_UpperCAmelCase ): '''simple docstring''' lowercase__ = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead.", _UpperCAmelCase, ) lowercase__ = kwargs.pop("feature_extractor" ) lowercase__ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) lowercase__ = tokenizer lowercase__ = AutoTokenizer.from_pretrained("gpt2" ) lowercase__ = AutoTokenizer.from_pretrained("bert-base-uncased" ) super().__init__(_UpperCAmelCase, _UpperCAmelCase ) def __call__( self, _UpperCAmelCase=None, _UpperCAmelCase=None, _UpperCAmelCase=None, **_UpperCAmelCase ): '''simple docstring''' if images is None and text is None: raise ValueError("You need to specify either an `images` or `text` input to process." ) if images is not None: lowercase__ = self.image_processor(_UpperCAmelCase, return_tensors=_UpperCAmelCase, **_UpperCAmelCase ) if text is not None: lowercase__ = self.char_tokenizer(_UpperCAmelCase, return_tensors=_UpperCAmelCase, **_UpperCAmelCase ) if text is None: return inputs elif images is None: return encodings else: lowercase__ = encodings["input_ids"] return inputs def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' lowercase__ , lowercase__ , lowercase__ = sequences lowercase__ = char_preds.size(0 ) lowercase__ , lowercase__ = self._decode_helper(_UpperCAmelCase, "char" ) lowercase__ , lowercase__ = self._decode_helper(_UpperCAmelCase, "bpe" ) lowercase__ , lowercase__ = self._decode_helper(_UpperCAmelCase, "wp" ) lowercase__ = [] lowercase__ = [] for i in range(_UpperCAmelCase ): lowercase__ = [char_scores[i], bpe_scores[i], wp_scores[i]] lowercase__ = [char_strs[i], bpe_strs[i], wp_strs[i]] lowercase__ = scores.index(max(_UpperCAmelCase ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) lowercase__ = {} lowercase__ = final_strs lowercase__ = final_scores lowercase__ = char_strs lowercase__ = bpe_strs lowercase__ = wp_strs return out def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' if format == DecodeType.CHARACTER: lowercase__ = self.char_decode lowercase__ = 1 lowercase__ = "[s]" elif format == DecodeType.BPE: lowercase__ = self.bpe_decode lowercase__ = 2 lowercase__ = "#" elif format == DecodeType.WORDPIECE: lowercase__ = self.wp_decode lowercase__ = 102 lowercase__ = "[SEP]" else: raise ValueError(F'''Format {format} is not supported.''' ) lowercase__ , lowercase__ = [], [] lowercase__ = pred_logits.size(0 ) lowercase__ = pred_logits.size(1 ) lowercase__ , lowercase__ = pred_logits.topk(1, dim=-1, largest=_UpperCAmelCase, sorted=_UpperCAmelCase ) lowercase__ = preds_index.view(-1, _UpperCAmelCase )[:, 1:] lowercase__ = decoder(_UpperCAmelCase ) lowercase__ , lowercase__ = torch.nn.functional.softmax(_UpperCAmelCase, dim=2 ).max(dim=2 ) lowercase__ = preds_max_prob[:, 1:] for index in range(_UpperCAmelCase ): lowercase__ = preds_str[index].find(_UpperCAmelCase ) lowercase__ = preds_str[index][:pred_eos] lowercase__ = preds_index[index].cpu().tolist() lowercase__ = pred_index.index(_UpperCAmelCase ) if eos_token in pred_index else -1 lowercase__ = preds_max_prob[index][: pred_eos_index + 1] lowercase__ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(_UpperCAmelCase ) conf_scores.append(_UpperCAmelCase ) return dec_strs, conf_scores def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' lowercase__ = [seq.replace(" ", "" ) for seq in self.char_tokenizer.batch_decode(_UpperCAmelCase )] return decode_strs def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' return self.bpe_tokenizer.batch_decode(_UpperCAmelCase ) def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' lowercase__ = [seq.replace(" ", "" ) for seq in self.wp_tokenizer.batch_decode(_UpperCAmelCase )] return decode_strs
668
"""simple docstring""" from __future__ import annotations def __a ( A , A ): '''simple docstring''' if partitions <= 0: raise ValueError("partitions must be a positive number!" ) if partitions > number_of_bytes: raise ValueError("partitions can not > number_of_bytes!" ) lowercase__ = number_of_bytes // partitions lowercase__ = [] for i in range(A ): lowercase__ = i * bytes_per_partition + 1 lowercase__ = ( number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition ) allocation_list.append(f'''{start_bytes}-{end_bytes}''' ) return allocation_list if __name__ == "__main__": import doctest doctest.testmod()
668
1
"""simple docstring""" from __future__ import annotations import math def __a ( A ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True lowerCAmelCase_: Optional[Any] = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)] def __a ( A ): '''simple docstring''' if not isinstance(A , A ): raise ValueError("n must be an integer" ) if n <= 0: raise ValueError("n must be >= 0" ) lowercase__ = [] for num in range(len(A ) ): lowercase__ = 0 while 2 * i * i <= odd_composites[num]: lowercase__ = odd_composites[num] - 2 * i * i if is_prime(A ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(A ) == n: return list_nums return [] def __a ( ): '''simple docstring''' return compute_nums(1 )[0] if __name__ == "__main__": print(F'{solution() = }')
668
"""simple docstring""" from collections import deque class a__ : def __init__( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' lowercase__ = process_name # process name lowercase__ = arrival_time # arrival time of the process # completion time of finished process or last interrupted time lowercase__ = arrival_time lowercase__ = burst_time # remaining burst time lowercase__ = 0 # total time of the process wait in ready queue lowercase__ = 0 # time from arrival time to completion time class a__ : def __init__( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ): '''simple docstring''' lowercase__ = number_of_queues # time slice of queues that round robin algorithm applied lowercase__ = time_slices # unfinished process is in this ready_queue lowercase__ = queue # current time lowercase__ = current_time # finished process is in this sequence queue lowercase__ = deque() def snake_case__ ( self ): '''simple docstring''' lowercase__ = [] for i in range(len(self.finish_queue ) ): sequence.append(self.finish_queue[i].process_name ) return sequence def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' lowercase__ = [] for i in range(len(_UpperCAmelCase ) ): waiting_times.append(queue[i].waiting_time ) return waiting_times def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' lowercase__ = [] for i in range(len(_UpperCAmelCase ) ): turnaround_times.append(queue[i].turnaround_time ) return turnaround_times def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' lowercase__ = [] for i in range(len(_UpperCAmelCase ) ): completion_times.append(queue[i].stop_time ) return completion_times def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' return [q.burst_time for q in queue] def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' process.waiting_time += self.current_time - process.stop_time return process.waiting_time def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' lowercase__ = deque() # sequence deque of finished process while len(_UpperCAmelCase ) != 0: lowercase__ = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of current process self.update_waiting_time(_UpperCAmelCase ) # update current time self.current_time += cp.burst_time # finish the process and set the process's burst-time 0 lowercase__ = 0 # set the process's turnaround time because it is finished lowercase__ = self.current_time - cp.arrival_time # set the completion time lowercase__ = self.current_time # add the process to queue that has finished queue finished.append(_UpperCAmelCase ) self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue # FCFS will finish all remaining processes return finished def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' lowercase__ = deque() # sequence deque of terminated process # just for 1 cycle and unfinished processes will go back to queue for _ in range(len(_UpperCAmelCase ) ): lowercase__ = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of unfinished processes self.update_waiting_time(_UpperCAmelCase ) # if the burst time of process is bigger than time-slice if cp.burst_time > time_slice: # use CPU for only time-slice self.current_time += time_slice # update remaining burst time cp.burst_time -= time_slice # update end point time lowercase__ = self.current_time # locate the process behind the queue because it is not finished ready_queue.append(_UpperCAmelCase ) else: # use CPU for remaining burst time self.current_time += cp.burst_time # set burst time 0 because the process is finished lowercase__ = 0 # set the finish time lowercase__ = self.current_time # update the process' turnaround time because it is finished lowercase__ = self.current_time - cp.arrival_time # add the process to queue that has finished queue finished.append(_UpperCAmelCase ) self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue # return finished processes queue and remaining processes queue return finished, ready_queue def snake_case__ ( self ): '''simple docstring''' for i in range(self.number_of_queues - 1 ): lowercase__ , lowercase__ = self.round_robin( self.ready_queue, self.time_slices[i] ) # the last queue has first_come_first_served algorithm self.first_come_first_served(self.ready_queue ) return self.finish_queue if __name__ == "__main__": import doctest lowerCAmelCase_: Optional[int] = Process("P1", 0, 5_3) lowerCAmelCase_: Union[str, Any] = Process("P2", 0, 1_7) lowerCAmelCase_: str = Process("P3", 0, 6_8) lowerCAmelCase_: int = Process("P4", 0, 2_4) lowerCAmelCase_: Dict = 3 lowerCAmelCase_: Any = [1_7, 2_5] lowerCAmelCase_: Tuple = deque([Pa, Pa, Pa, Pa]) if len(time_slices) != number_of_queues - 1: raise SystemExit(0) doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])}) lowerCAmelCase_: Any = Process("P1", 0, 5_3) lowerCAmelCase_: Tuple = Process("P2", 0, 1_7) lowerCAmelCase_: Optional[int] = Process("P3", 0, 6_8) lowerCAmelCase_: List[Any] = Process("P4", 0, 2_4) lowerCAmelCase_: Union[str, Any] = 3 lowerCAmelCase_: Any = [1_7, 2_5] lowerCAmelCase_: Optional[Any] = deque([Pa, Pa, Pa, Pa]) lowerCAmelCase_: Union[str, Any] = MLFQ(number_of_queues, time_slices, queue, 0) lowerCAmelCase_: Tuple = mlfq.multi_level_feedback_queue() # print total waiting times of processes(P1, P2, P3, P4) print( F'waiting time:\ \t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}' ) # print completion times of processes(P1, P2, P3, P4) print( F'completion time:\ \t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}' ) # print total turnaround times of processes(P1, P2, P3, P4) print( F'turnaround time:\ \t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}' ) # print sequence of finished processes print( F'sequence of finished processes:\ {mlfq.calculate_sequence_of_finish_queue()}' )
668
1
"""simple docstring""" from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging lowerCAmelCase_: Union[str, Any] = logging.get_logger(__name__) class a__ ( _a ): snake_case_ = ["audio_values", "audio_mask"] def __init__( self, _UpperCAmelCase=2048, _UpperCAmelCase=1, _UpperCAmelCase=[16, 16], _UpperCAmelCase=128, _UpperCAmelCase=4_4100, _UpperCAmelCase=86, _UpperCAmelCase=2048, _UpperCAmelCase=0.0, **_UpperCAmelCase, ): '''simple docstring''' super().__init__( feature_size=_UpperCAmelCase, sampling_rate=_UpperCAmelCase, padding_value=_UpperCAmelCase, **_UpperCAmelCase, ) lowercase__ = spectrogram_length lowercase__ = num_channels lowercase__ = patch_size lowercase__ = feature_size // self.patch_size[1] lowercase__ = n_fft lowercase__ = sampling_rate // hop_length_to_sampling_rate lowercase__ = sampling_rate lowercase__ = padding_value lowercase__ = mel_filter_bank( num_frequency_bins=1 + n_fft // 2, num_mel_filters=_UpperCAmelCase, min_frequency=0.0, max_frequency=22_050.0, sampling_rate=_UpperCAmelCase, norm="slaney", mel_scale="slaney", ).T def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' lowercase__ = spectrogram( _UpperCAmelCase, window_function(self.n_fft, "hann" ), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters.T, log_mel="dB", db_range=80.0, ) lowercase__ = log_spec[:, :-1] lowercase__ = log_spec - 20.0 lowercase__ = np.clip(log_spec / 40.0, -2.0, 0.0 ) + 1.0 return log_spec def __call__( self, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = True, _UpperCAmelCase = None, _UpperCAmelCase = False, _UpperCAmelCase = False, **_UpperCAmelCase, ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( "This feature extractor is set to support sampling rate" F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled''' F''' with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) lowercase__ = isinstance(_UpperCAmelCase, np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) lowercase__ = is_batched_numpy or ( isinstance(_UpperCAmelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) )) ) if is_batched: lowercase__ = [np.asarray([speech], dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(_UpperCAmelCase, np.ndarray ): lowercase__ = np.asarray(_UpperCAmelCase, dtype=np.floataa ) elif isinstance(_UpperCAmelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowercase__ = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowercase__ = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis lowercase__ = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0], _UpperCAmelCase ): lowercase__ = [np.asarray(_UpperCAmelCase, dtype=np.floataa ) for feature in audio_features] # Create audio attention mask lowercase__ = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: lowercase__ = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] lowercase__ = np.array(_UpperCAmelCase ).astype(np.floataa ) # convert into correct format for padding lowercase__ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch lowercase__ = np.ones([len(_UpperCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) lowercase__ = padded_audio_features * self.padding_value for i in range(len(_UpperCAmelCase ) ): lowercase__ = audio_features[i] lowercase__ = feature # return as BatchFeature if return_attention_mask: lowercase__ = {"audio_values": padded_audio_features, "audio_mask": audio_mask} else: lowercase__ = {"audio_values": padded_audio_features} lowercase__ = BatchFeature(data=_UpperCAmelCase, tensor_type=_UpperCAmelCase ) return encoded_inputs
668
"""simple docstring""" import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, ByTaTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): lowerCAmelCase_: Dict = "pt" elif is_tf_available(): lowerCAmelCase_: Dict = "tf" else: lowerCAmelCase_: str = "jax" class a__ ( _a , unittest.TestCase ): snake_case_ = ByTaTokenizer snake_case_ = False def snake_case__ ( self ): '''simple docstring''' super().setUp() lowercase__ = ByTaTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def snake_case__ ( self ): '''simple docstring''' return ByTaTokenizer.from_pretrained("google/byt5-small" ) def snake_case__ ( self, **_UpperCAmelCase ): '''simple docstring''' return self.tokenizer_class.from_pretrained(self.tmpdirname, **_UpperCAmelCase ) def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase=False, _UpperCAmelCase=20, _UpperCAmelCase=5 ): '''simple docstring''' lowercase__ = [] for i in range(len(_UpperCAmelCase ) ): try: lowercase__ = tokenizer.decode([i], clean_up_tokenization_spaces=_UpperCAmelCase ) except UnicodeDecodeError: pass toks.append((i, tok) ) lowercase__ = list(filter(lambda _UpperCAmelCase : re.match(R"^[ a-zA-Z]+$", t[1] ), _UpperCAmelCase ) ) lowercase__ = list(filter(lambda _UpperCAmelCase : [t[0]] == tokenizer.encode(t[1], add_special_tokens=_UpperCAmelCase ), _UpperCAmelCase ) ) if max_length is not None and len(_UpperCAmelCase ) > max_length: lowercase__ = toks[:max_length] if min_length is not None and len(_UpperCAmelCase ) < min_length and len(_UpperCAmelCase ) > 0: while len(_UpperCAmelCase ) < min_length: lowercase__ = toks + toks # toks_str = [t[1] for t in toks] lowercase__ = [t[0] for t in toks] # Ensure consistency lowercase__ = tokenizer.decode(_UpperCAmelCase, clean_up_tokenization_spaces=_UpperCAmelCase ) if " " not in output_txt and len(_UpperCAmelCase ) > 1: lowercase__ = ( tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=_UpperCAmelCase ) + " " + tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=_UpperCAmelCase ) ) if with_prefix_space: lowercase__ = " " + output_txt lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) return output_txt, output_ids def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.ta_base_tokenizer lowercase__ = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"] ) lowercase__ = tokenizer(["hi", "I went to the gym", ""] ) self.assertListEqual(batch_with_eos_added["input_ids"], batch_without_eos_added["input_ids"] ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.ta_base_tokenizer lowercase__ = "Unicode €." lowercase__ = tokenizer(_UpperCAmelCase ) lowercase__ = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1] self.assertEqual(encoded["input_ids"], _UpperCAmelCase ) # decoding lowercase__ = tokenizer.decode(_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase, "Unicode €.</s>" ) lowercase__ = tokenizer("e è é ê ë" ) lowercase__ = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1] self.assertEqual(encoded["input_ids"], _UpperCAmelCase ) # decoding lowercase__ = tokenizer.decode(_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase, "e è é ê ë</s>" ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ), "e è é ê ë</s>" ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.ta_base_tokenizer lowercase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."] # fmt: off lowercase__ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0] # fmt: on lowercase__ = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, return_tensors=_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase ) if FRAMEWORK != "jax": lowercase__ = list(batch.input_ids.numpy()[0] ) else: lowercase__ = list(batch.input_ids.tolist()[0] ) self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase ) self.assertEqual((2, 37), batch.input_ids.shape ) self.assertEqual((2, 37), batch.attention_mask.shape ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.ta_base_tokenizer lowercase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."] lowercase__ = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, return_tensors=_UpperCAmelCase ) # check if input_ids are returned and no decoder_input_ids self.assertIn("input_ids", _UpperCAmelCase ) self.assertIn("attention_mask", _UpperCAmelCase ) self.assertNotIn("decoder_input_ids", _UpperCAmelCase ) self.assertNotIn("decoder_attention_mask", _UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.ta_base_tokenizer lowercase__ = [ "Summary of the text.", "Another summary.", ] lowercase__ = tokenizer( text_target=_UpperCAmelCase, max_length=32, padding="max_length", truncation=_UpperCAmelCase, return_tensors=_UpperCAmelCase ) self.assertEqual(32, targets["input_ids"].shape[1] ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.ta_base_tokenizer lowercase__ = ["A long paragraph for summarization. </s>"] lowercase__ = ["Summary of the text. </s>"] # fmt: off lowercase__ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1] lowercase__ = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1] # fmt: on lowercase__ = tokenizer(_UpperCAmelCase, text_target=_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase, batch["input_ids"][0] ) self.assertEqual(_UpperCAmelCase, batch["labels"][0] ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length, 42 ) # Now let's start the test lowercase__ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc lowercase__ = tempfile.mkdtemp() lowercase__ = " He is very happy, UNwant\u00E9d,running" lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) tokenizer.save_pretrained(_UpperCAmelCase ) lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase ) lowercase__ = after_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase ) shutil.rmtree(_UpperCAmelCase ) lowercase__ = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc lowercase__ = tempfile.mkdtemp() lowercase__ = " He is very happy, UNwant\u00E9d,running" tokenizer.add_tokens(["bim", "bambam"] ) lowercase__ = tokenizer.additional_special_tokens additional_special_tokens.append("new_additional_special_token" ) tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} ) lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) tokenizer.save_pretrained(_UpperCAmelCase ) lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase ) lowercase__ = after_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase ) self.assertIn("new_additional_special_token", after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length, 42 ) lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase, model_max_length=43 ) self.assertEqual(tokenizer.model_max_length, 43 ) shutil.rmtree(_UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(_UpperCAmelCase ) with open(os.path.join(_UpperCAmelCase, "special_tokens_map.json" ), encoding="utf-8" ) as json_file: lowercase__ = json.load(_UpperCAmelCase ) with open(os.path.join(_UpperCAmelCase, "tokenizer_config.json" ), encoding="utf-8" ) as json_file: lowercase__ = json.load(_UpperCAmelCase ) lowercase__ = [F'''<extra_id_{i}>''' for i in range(125 )] lowercase__ = added_tokens_extra_ids + [ "an_additional_special_token" ] lowercase__ = added_tokens_extra_ids + [ "an_additional_special_token" ] with open(os.path.join(_UpperCAmelCase, "special_tokens_map.json" ), "w", encoding="utf-8" ) as outfile: json.dump(_UpperCAmelCase, _UpperCAmelCase ) with open(os.path.join(_UpperCAmelCase, "tokenizer_config.json" ), "w", encoding="utf-8" ) as outfile: json.dump(_UpperCAmelCase, _UpperCAmelCase ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files lowercase__ = tokenizer_class.from_pretrained( _UpperCAmelCase, ) self.assertIn( "an_additional_special_token", tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( ["an_additional_special_token"], tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ), ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained lowercase__ = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token", lstrip=_UpperCAmelCase )] lowercase__ = tokenizer_class.from_pretrained( _UpperCAmelCase, additional_special_tokens=_UpperCAmelCase, ) self.assertIn("a_new_additional_special_token", tokenizer.additional_special_tokens ) self.assertEqual( ["a_new_additional_special_token"], tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ), ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(_UpperCAmelCase ) lowercase__ = tokenizer_class.from_pretrained(_UpperCAmelCase ) self.assertTrue(tokenizer.decode([255] ) == "" ) def snake_case__ ( self ): '''simple docstring''' pass def snake_case__ ( self ): '''simple docstring''' pass def snake_case__ ( self ): '''simple docstring''' pass def snake_case__ ( self ): '''simple docstring''' pass def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.get_tokenizers(fast=_UpperCAmelCase, do_lower_case=_UpperCAmelCase ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): lowercase__ = ["t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "x", "t", "</s>"] lowercase__ = tokenizer.convert_tokens_to_string(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): lowercase__ = [ "bos_token", "eos_token", "unk_token", "sep_token", "pad_token", "cls_token", "mask_token", ] lowercase__ = 0 lowercase__ = tokenizer.convert_ids_to_tokens( _UpperCAmelCase, skip_special_tokens=_UpperCAmelCase ) for attr in attributes_list: setattr(_UpperCAmelCase, attr + "_id", _UpperCAmelCase ) self.assertEqual(getattr(_UpperCAmelCase, _UpperCAmelCase ), _UpperCAmelCase ) self.assertEqual(getattr(_UpperCAmelCase, attr + "_id" ), _UpperCAmelCase ) setattr(_UpperCAmelCase, attr + "_id", _UpperCAmelCase ) self.assertEqual(getattr(_UpperCAmelCase, _UpperCAmelCase ), _UpperCAmelCase ) self.assertEqual(getattr(_UpperCAmelCase, attr + "_id" ), _UpperCAmelCase ) setattr(_UpperCAmelCase, "additional_special_tokens_ids", [] ) self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens" ), [] ) self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens_ids" ), [] ) setattr(_UpperCAmelCase, "additional_special_tokens_ids", [token_id_to_test_setters] ) self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens" ), [token_to_test_setters] ) self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens_ids" ), [token_id_to_test_setters] )
668
1
"""simple docstring""" import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class a__ ( _a ): snake_case_ = ["image_processor", "tokenizer"] snake_case_ = "OwlViTImageProcessor" snake_case_ = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__( self, _UpperCAmelCase=None, _UpperCAmelCase=None, **_UpperCAmelCase ): '''simple docstring''' lowercase__ = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead.", _UpperCAmelCase, ) lowercase__ = kwargs.pop("feature_extractor" ) lowercase__ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(_UpperCAmelCase, _UpperCAmelCase ) def __call__( self, _UpperCAmelCase=None, _UpperCAmelCase=None, _UpperCAmelCase=None, _UpperCAmelCase="max_length", _UpperCAmelCase="np", **_UpperCAmelCase ): '''simple docstring''' if text is None and query_images is None and images is None: raise ValueError( "You have to specify at least one text or query image or image. All three cannot be none." ) if text is not None: if isinstance(_UpperCAmelCase, _UpperCAmelCase ) or (isinstance(_UpperCAmelCase, _UpperCAmelCase ) and not isinstance(text[0], _UpperCAmelCase )): lowercase__ = [self.tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, return_tensors=_UpperCAmelCase, **_UpperCAmelCase )] elif isinstance(_UpperCAmelCase, _UpperCAmelCase ) and isinstance(text[0], _UpperCAmelCase ): lowercase__ = [] # Maximum number of queries across batch lowercase__ = max([len(_UpperCAmelCase ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(_UpperCAmelCase ) != max_num_queries: lowercase__ = t + [" "] * (max_num_queries - len(_UpperCAmelCase )) lowercase__ = self.tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, return_tensors=_UpperCAmelCase, **_UpperCAmelCase ) encodings.append(_UpperCAmelCase ) else: raise TypeError("Input text should be a string, a list of strings or a nested list of strings" ) if return_tensors == "np": lowercase__ = np.concatenate([encoding["input_ids"] for encoding in encodings], axis=0 ) lowercase__ = np.concatenate([encoding["attention_mask"] for encoding in encodings], axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp lowercase__ = jnp.concatenate([encoding["input_ids"] for encoding in encodings], axis=0 ) lowercase__ = jnp.concatenate([encoding["attention_mask"] for encoding in encodings], axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch lowercase__ = torch.cat([encoding["input_ids"] for encoding in encodings], dim=0 ) lowercase__ = torch.cat([encoding["attention_mask"] for encoding in encodings], dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf lowercase__ = tf.stack([encoding["input_ids"] for encoding in encodings], axis=0 ) lowercase__ = tf.stack([encoding["attention_mask"] for encoding in encodings], axis=0 ) else: raise ValueError("Target return tensor type could not be returned" ) lowercase__ = BatchEncoding() lowercase__ = input_ids lowercase__ = attention_mask if query_images is not None: lowercase__ = BatchEncoding() lowercase__ = self.image_processor( _UpperCAmelCase, return_tensors=_UpperCAmelCase, **_UpperCAmelCase ).pixel_values lowercase__ = query_pixel_values if images is not None: lowercase__ = self.image_processor(_UpperCAmelCase, return_tensors=_UpperCAmelCase, **_UpperCAmelCase ) if text is not None and images is not None: lowercase__ = image_features.pixel_values return encoding elif query_images is not None and images is not None: lowercase__ = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**_UpperCAmelCase ), tensor_type=_UpperCAmelCase ) def snake_case__ ( self, *_UpperCAmelCase, **_UpperCAmelCase ): '''simple docstring''' return self.image_processor.post_process(*_UpperCAmelCase, **_UpperCAmelCase ) def snake_case__ ( self, *_UpperCAmelCase, **_UpperCAmelCase ): '''simple docstring''' return self.image_processor.post_process_object_detection(*_UpperCAmelCase, **_UpperCAmelCase ) def snake_case__ ( self, *_UpperCAmelCase, **_UpperCAmelCase ): '''simple docstring''' return self.image_processor.post_process_image_guided_detection(*_UpperCAmelCase, **_UpperCAmelCase ) def snake_case__ ( self, *_UpperCAmelCase, **_UpperCAmelCase ): '''simple docstring''' return self.tokenizer.batch_decode(*_UpperCAmelCase, **_UpperCAmelCase ) def snake_case__ ( self, *_UpperCAmelCase, **_UpperCAmelCase ): '''simple docstring''' return self.tokenizer.decode(*_UpperCAmelCase, **_UpperCAmelCase ) @property def snake_case__ ( self ): '''simple docstring''' warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.", _UpperCAmelCase, ) return self.image_processor_class @property def snake_case__ ( self ): '''simple docstring''' warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.", _UpperCAmelCase, ) return self.image_processor
668
"""simple docstring""" import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class a__ ( unittest.TestCase ): snake_case_ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' lowercase__ = hf_hub_download( repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset" ) lowercase__ = VideoClassificationPipeline(model=_UpperCAmelCase, image_processor=_UpperCAmelCase, top_k=2 ) lowercase__ = [ example_video_filepath, "https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4", ] return video_classifier, examples def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' for example in examples: lowercase__ = video_classifier(_UpperCAmelCase ) self.assertEqual( _UpperCAmelCase, [ {"score": ANY(_UpperCAmelCase ), "label": ANY(_UpperCAmelCase )}, {"score": ANY(_UpperCAmelCase ), "label": ANY(_UpperCAmelCase )}, ], ) @require_torch def snake_case__ ( self ): '''simple docstring''' lowercase__ = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification" lowercase__ = VideoMAEFeatureExtractor( size={"shortest_edge": 10}, crop_size={"height": 10, "width": 10} ) lowercase__ = pipeline( "video-classification", model=_UpperCAmelCase, feature_extractor=_UpperCAmelCase, frame_sampling_rate=4 ) lowercase__ = hf_hub_download(repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset" ) lowercase__ = video_classifier(_UpperCAmelCase, top_k=2 ) self.assertEqual( nested_simplify(_UpperCAmelCase, decimals=4 ), [{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}], ) lowercase__ = video_classifier( [ video_file_path, video_file_path, ], top_k=2, ) self.assertEqual( nested_simplify(_UpperCAmelCase, decimals=4 ), [ [{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}], [{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}], ], ) @require_tf def snake_case__ ( self ): '''simple docstring''' pass
668
1
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_xlnet import XLNetTokenizer else: lowerCAmelCase_: str = None lowerCAmelCase_: List[Any] = logging.get_logger(__name__) lowerCAmelCase_: Tuple = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} lowerCAmelCase_: int = { "vocab_file": { "xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model", "xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model", }, "tokenizer_file": { "xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json", "xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json", }, } lowerCAmelCase_: int = { "xlnet-base-cased": None, "xlnet-large-cased": None, } lowerCAmelCase_: List[str] = "▁" # Segments (not really needed) lowerCAmelCase_: List[str] = 0 lowerCAmelCase_: List[str] = 1 lowerCAmelCase_: int = 2 lowerCAmelCase_: Union[str, Any] = 3 lowerCAmelCase_: Tuple = 4 class a__ ( _a ): snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = "left" snake_case_ = XLNetTokenizer def __init__( self, _UpperCAmelCase=None, _UpperCAmelCase=None, _UpperCAmelCase=False, _UpperCAmelCase=True, _UpperCAmelCase=False, _UpperCAmelCase="<s>", _UpperCAmelCase="</s>", _UpperCAmelCase="<unk>", _UpperCAmelCase="<sep>", _UpperCAmelCase="<pad>", _UpperCAmelCase="<cls>", _UpperCAmelCase="<mask>", _UpperCAmelCase=["<eop>", "<eod>"], **_UpperCAmelCase, ): '''simple docstring''' lowercase__ = AddedToken(_UpperCAmelCase, lstrip=_UpperCAmelCase, rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase, _UpperCAmelCase ) else mask_token super().__init__( vocab_file=_UpperCAmelCase, tokenizer_file=_UpperCAmelCase, do_lower_case=_UpperCAmelCase, remove_space=_UpperCAmelCase, keep_accents=_UpperCAmelCase, bos_token=_UpperCAmelCase, eos_token=_UpperCAmelCase, unk_token=_UpperCAmelCase, sep_token=_UpperCAmelCase, pad_token=_UpperCAmelCase, cls_token=_UpperCAmelCase, mask_token=_UpperCAmelCase, additional_special_tokens=_UpperCAmelCase, **_UpperCAmelCase, ) lowercase__ = 3 lowercase__ = do_lower_case lowercase__ = remove_space lowercase__ = keep_accents lowercase__ = vocab_file lowercase__ = False if not self.vocab_file else True def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase = None ): '''simple docstring''' lowercase__ = [self.sep_token_id] lowercase__ = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase = None ): '''simple docstring''' lowercase__ = [self.sep_token_id] lowercase__ = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(_UpperCAmelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowercase__ = os.path.join( _UpperCAmelCase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ): copyfile(self.vocab_file, _UpperCAmelCase ) return (out_vocab_file,)
668
"""simple docstring""" import itertools import math def __a ( A ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __a ( ): '''simple docstring''' lowercase__ = 2 while True: if is_prime(A ): yield num num += 1 def __a ( A = 1_00_01 ): '''simple docstring''' return next(itertools.islice(prime_generator() , nth - 1 , A ) ) if __name__ == "__main__": print(F'{solution() = }')
668
1
"""simple docstring""" import argparse lowerCAmelCase_: Optional[int] = "docs/source/_static/js/custom.js" def __a ( A ): '''simple docstring''' with open(A , encoding="utf-8" , newline="\n" ) as f: lowercase__ = f.readlines() lowercase__ = 0 # First let's put the right version while not lines[index].startswith("const stableVersion =" ): index += 1 lowercase__ = f'''const stableVersion = "v{version}"\n''' # Then update the dictionary while not lines[index].startswith("const versionMapping = {" ): index += 1 # We go until the end while not lines[index].startswith("}" ): index += 1 # We add the new version at the end lines[index - 1] += f''' "v{version}": "v{version}",\n''' with open(A , "w" , encoding="utf-8" , newline="\n" ) as f: f.writelines(A ) if __name__ == "__main__": lowerCAmelCase_: List[str] = argparse.ArgumentParser() parser.add_argument("--version", help="Release version.") lowerCAmelCase_: Dict = parser.parse_args() update_custom_js(args.version)
668
"""simple docstring""" from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class a__ ( _a ): def __init__( self, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = False, _UpperCAmelCase = False, _UpperCAmelCase = None, **_UpperCAmelCase, ): '''simple docstring''' super().__init__( _UpperCAmelCase, split=_UpperCAmelCase, features=_UpperCAmelCase, cache_dir=_UpperCAmelCase, keep_in_memory=_UpperCAmelCase, streaming=_UpperCAmelCase, num_proc=_UpperCAmelCase, **_UpperCAmelCase, ) lowercase__ = path_or_paths if isinstance(_UpperCAmelCase, _UpperCAmelCase ) else {self.split: path_or_paths} lowercase__ = Text( cache_dir=_UpperCAmelCase, data_files=_UpperCAmelCase, features=_UpperCAmelCase, **_UpperCAmelCase, ) def snake_case__ ( self ): '''simple docstring''' if self.streaming: lowercase__ = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: lowercase__ = None lowercase__ = None lowercase__ = None lowercase__ = None self.builder.download_and_prepare( download_config=_UpperCAmelCase, download_mode=_UpperCAmelCase, verification_mode=_UpperCAmelCase, base_path=_UpperCAmelCase, num_proc=self.num_proc, ) lowercase__ = self.builder.as_dataset( split=self.split, verification_mode=_UpperCAmelCase, in_memory=self.keep_in_memory ) return dataset
668
1
"""simple docstring""" import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def __a ( ): '''simple docstring''' lowercase__ = ArgumentParser( description=( "PyTorch TPU distributed training launch " "helper utility that will spawn up " "multiple distributed processes" ) ) # Optional arguments for the launch helper parser.add_argument("--num_cores" , type=A , default=1 , help="Number of TPU cores to use (1 or 8)." ) # positional parser.add_argument( "training_script" , type=A , help=( "The full path to the single TPU training " "program/script to be launched in parallel, " "followed by all the arguments for the " "training script" ) , ) # rest from the training program parser.add_argument("training_script_args" , nargs=A ) return parser.parse_args() def __a ( ): '''simple docstring''' lowercase__ = parse_args() # Import training_script as a module. lowercase__ = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) lowercase__ = script_fpath.stem lowercase__ = importlib.import_module(A ) # Patch sys.argv lowercase__ = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
668
"""simple docstring""" import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler lowerCAmelCase_: List[str] = 1_6 lowerCAmelCase_: Optional[Any] = 3_2 def __a ( A , A = 16 , A = "bert-base-cased" ): '''simple docstring''' lowercase__ = AutoTokenizer.from_pretrained(A ) lowercase__ = load_dataset("glue" , "mrpc" ) def tokenize_function(A ): # max_length=None => use the model max length (it's actually the default) lowercase__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=A , max_length=A ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset lowercase__ = datasets.map( A , batched=A , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=A ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowercase__ = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(A ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(A , padding="max_length" , max_length=1_28 , return_tensors="pt" ) return tokenizer.pad(A , padding="longest" , return_tensors="pt" ) # Instantiate dataloaders. lowercase__ = DataLoader( tokenized_datasets["train"] , shuffle=A , collate_fn=A , batch_size=A ) lowercase__ = DataLoader( tokenized_datasets["validation"] , shuffle=A , collate_fn=A , batch_size=A ) return train_dataloader, eval_dataloader def __a ( A , A ): '''simple docstring''' lowercase__ = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowercase__ = config["lr"] lowercase__ = int(config["num_epochs"] ) lowercase__ = int(config["seed"] ) lowercase__ = int(config["batch_size"] ) lowercase__ = args.model_name_or_path set_seed(A ) lowercase__ , lowercase__ = get_dataloaders(A , A , A ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowercase__ = AutoModelForSequenceClassification.from_pretrained(A , return_dict=A ) # Instantiate optimizer lowercase__ = ( AdamW if accelerator.state.deepspeed_plugin is None or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) lowercase__ = optimizer_cls(params=model.parameters() , lr=A ) if accelerator.state.deepspeed_plugin is not None: lowercase__ = accelerator.state.deepspeed_plugin.deepspeed_config[ "gradient_accumulation_steps" ] else: lowercase__ = 1 lowercase__ = (len(A ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): lowercase__ = get_linear_schedule_with_warmup( optimizer=A , num_warmup_steps=0 , num_training_steps=A , ) else: lowercase__ = DummyScheduler(A , total_num_steps=A , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare( A , A , A , A , A ) # We need to keep track of how many total steps we have iterated over lowercase__ = 0 # We also need to keep track of the stating epoch so files are named properly lowercase__ = 0 # Now we train the model lowercase__ = evaluate.load("glue" , "mrpc" ) lowercase__ = 0 lowercase__ = {} for epoch in range(A , A ): model.train() for step, batch in enumerate(A ): lowercase__ = model(**A ) lowercase__ = outputs.loss lowercase__ = loss / gradient_accumulation_steps accelerator.backward(A ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() lowercase__ = 0 for step, batch in enumerate(A ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowercase__ = model(**A ) lowercase__ = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times lowercase__ , lowercase__ = accelerator.gather( (predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(A ) - 1: lowercase__ = predictions[: len(eval_dataloader.dataset ) - samples_seen] lowercase__ = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=A , references=A , ) lowercase__ = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}:''' , A ) lowercase__ = eval_metric["accuracy"] if best_performance < eval_metric["accuracy"]: lowercase__ = eval_metric["accuracy"] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}''' accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , "all_results.json" ) , "w" ) as f: json.dump(A , A ) def __a ( ): '''simple docstring''' lowercase__ = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." ) parser.add_argument( "--model_name_or_path" , type=A , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=A , ) parser.add_argument( "--output_dir" , type=A , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , ) parser.add_argument( "--performance_lower_bound" , type=A , default=A , help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value." , ) parser.add_argument( "--num_epochs" , type=A , default=3 , help="Number of train epochs." , ) lowercase__ = parser.parse_args() lowercase__ = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16} training_function(A , A ) if __name__ == "__main__": main()
668
1
"""simple docstring""" import argparse from pathlib import Path from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration def __a ( A , A , A , A , A = None , A = None , A = None , ): '''simple docstring''' if config_name_or_path is None: lowercase__ = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base" if generator_tokenizer_name_or_path is None: lowercase__ = generator_name_or_path if question_encoder_tokenizer_name_or_path is None: lowercase__ = question_encoder_name_or_path lowercase__ = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration # Save model. lowercase__ = RagConfig.from_pretrained(A ) lowercase__ = AutoConfig.from_pretrained(A ) lowercase__ = AutoConfig.from_pretrained(A ) lowercase__ = gen_config lowercase__ = question_encoder_config lowercase__ = model_class.from_pretrained_question_encoder_generator( A , A , config=A ) rag_model.save_pretrained(A ) # Sanity check. model_class.from_pretrained(A ) # Save tokenizers. lowercase__ = AutoTokenizer.from_pretrained(A ) gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" ) lowercase__ = AutoTokenizer.from_pretrained(A ) question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" ) if __name__ == "__main__": lowerCAmelCase_: int = argparse.ArgumentParser() parser.add_argument( "--model_type", choices=["rag_sequence", "rag_token"], required=True, type=str, help="RAG model type: rag_sequence, rag_token", ) parser.add_argument("--dest", type=str, required=True, help="Path to the output checkpoint directory.") parser.add_argument("--generator_name_or_path", type=str, required=True, help="Generator model identifier") parser.add_argument( "--question_encoder_name_or_path", type=str, required=True, help="Question encoder model identifier" ) parser.add_argument( "--generator_tokenizer_name_or_path", type=str, help="Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``", ) parser.add_argument( "--question_encoder_tokenizer_name_or_path", type=str, help="Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``", ) parser.add_argument( "--config_name_or_path", type=str, help=( "Identifier of the model config to use, if not provided, resolves to a base config for a given" " ``model_type``" ), ) lowerCAmelCase_: Any = parser.parse_args() lowerCAmelCase_: Optional[Any] = Path(args.dest) dest_dir.mkdir(exist_ok=True) consolidate( args.model_type, args.generator_name_or_path, args.question_encoder_name_or_path, dest_dir, args.config_name_or_path, args.generator_tokenizer_name_or_path, args.question_encoder_tokenizer_name_or_path, )
668
"""simple docstring""" import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class a__ ( _a ): snake_case_ = (IPNDMScheduler,) snake_case_ = (("num_inference_steps", 50),) def snake_case__ ( self, **_UpperCAmelCase ): '''simple docstring''' lowercase__ = {"num_train_timesteps": 1000} config.update(**_UpperCAmelCase ) return config def snake_case__ ( self, _UpperCAmelCase=0, **_UpperCAmelCase ): '''simple docstring''' lowercase__ = dict(self.forward_default_kwargs ) lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase ) lowercase__ = self.dummy_sample lowercase__ = 0.1 * sample lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: lowercase__ = self.get_scheduler_config(**_UpperCAmelCase ) lowercase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals lowercase__ = dummy_past_residuals[:] if time_step is None: lowercase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_UpperCAmelCase ) lowercase__ = scheduler_class.from_pretrained(_UpperCAmelCase ) new_scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals lowercase__ = dummy_past_residuals[:] lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def snake_case__ ( self ): '''simple docstring''' pass def snake_case__ ( self, _UpperCAmelCase=0, **_UpperCAmelCase ): '''simple docstring''' lowercase__ = dict(self.forward_default_kwargs ) lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase ) lowercase__ = self.dummy_sample lowercase__ = 0.1 * sample lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: lowercase__ = self.get_scheduler_config() lowercase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals (must be after setting timesteps) lowercase__ = dummy_past_residuals[:] if time_step is None: lowercase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_UpperCAmelCase ) lowercase__ = scheduler_class.from_pretrained(_UpperCAmelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residual (must be after setting timesteps) lowercase__ = dummy_past_residuals[:] lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def snake_case__ ( self, **_UpperCAmelCase ): '''simple docstring''' lowercase__ = self.scheduler_classes[0] lowercase__ = self.get_scheduler_config(**_UpperCAmelCase ) lowercase__ = scheduler_class(**_UpperCAmelCase ) lowercase__ = 10 lowercase__ = self.dummy_model() lowercase__ = self.dummy_sample_deter scheduler.set_timesteps(_UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): lowercase__ = model(_UpperCAmelCase, _UpperCAmelCase ) lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ).prev_sample for i, t in enumerate(scheduler.timesteps ): lowercase__ = model(_UpperCAmelCase, _UpperCAmelCase ) lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ).prev_sample return sample def snake_case__ ( self ): '''simple docstring''' lowercase__ = dict(self.forward_default_kwargs ) lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase ) for scheduler_class in self.scheduler_classes: lowercase__ = self.get_scheduler_config() lowercase__ = scheduler_class(**_UpperCAmelCase ) lowercase__ = self.dummy_sample lowercase__ = 0.1 * sample if num_inference_steps is not None and hasattr(_UpperCAmelCase, "set_timesteps" ): scheduler.set_timesteps(_UpperCAmelCase ) elif num_inference_steps is not None and not hasattr(_UpperCAmelCase, "set_timesteps" ): lowercase__ = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] lowercase__ = dummy_past_residuals[:] lowercase__ = scheduler.timesteps[5] lowercase__ = scheduler.timesteps[6] lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample self.assertEqual(output_a.shape, sample.shape ) self.assertEqual(output_a.shape, output_a.shape ) lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample self.assertEqual(output_a.shape, sample.shape ) self.assertEqual(output_a.shape, output_a.shape ) def snake_case__ ( self ): '''simple docstring''' for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=_UpperCAmelCase, time_step=_UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100] ): self.check_over_forward(num_inference_steps=_UpperCAmelCase, time_step=_UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.full_loop() lowercase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_mean.item() - 254_0529 ) < 10
668
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCAmelCase_: List[str] = { "configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_: Tuple = ["BloomTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_: Union[str, Any] = [ "BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST", "BloomForCausalLM", "BloomModel", "BloomPreTrainedModel", "BloomForSequenceClassification", "BloomForTokenClassification", "BloomForQuestionAnswering", ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys lowerCAmelCase_: Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
668
"""simple docstring""" import json import os import unittest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class a__ ( _a , unittest.TestCase ): snake_case_ = MgpstrTokenizer snake_case_ = False snake_case_ = {} snake_case_ = False def snake_case__ ( self ): '''simple docstring''' super().setUp() # fmt: off lowercase__ = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"] # fmt: on lowercase__ = dict(zip(_UpperCAmelCase, range(len(_UpperCAmelCase ) ) ) ) lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file, "w", encoding="utf-8" ) as fp: fp.write(json.dumps(_UpperCAmelCase ) + "\n" ) def snake_case__ ( self, **_UpperCAmelCase ): '''simple docstring''' return MgpstrTokenizer.from_pretrained(self.tmpdirname, **_UpperCAmelCase ) def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' lowercase__ = "tester" lowercase__ = "tester" return input_text, output_text @unittest.skip("MGP-STR always lower cases letters." ) def snake_case__ ( self ): '''simple docstring''' pass def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.get_tokenizers(do_lower_case=_UpperCAmelCase ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): lowercase__ = "[SPECIAL_TOKEN]" tokenizer.add_special_tokens({"cls_token": special_token} ) lowercase__ = tokenizer.encode([special_token], add_special_tokens=_UpperCAmelCase ) self.assertEqual(len(_UpperCAmelCase ), 1 ) lowercase__ = tokenizer.decode(_UpperCAmelCase, skip_special_tokens=_UpperCAmelCase ) self.assertTrue(special_token not in decoded ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): lowercase__ , lowercase__ = self.get_input_output_texts(_UpperCAmelCase ) lowercase__ = tokenizer.tokenize(_UpperCAmelCase ) lowercase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) self.assertNotEqual(len(_UpperCAmelCase ), 0 ) lowercase__ = tokenizer.decode(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase ) self.assertEqual(text_a.replace(" ", "" ), _UpperCAmelCase ) @unittest.skip("MGP-STR tokenizer only handles one sequence." ) def snake_case__ ( self ): '''simple docstring''' pass @unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" ) def snake_case__ ( self ): '''simple docstring''' pass
668
1
"""simple docstring""" def __a ( A ): '''simple docstring''' lowercase__ = len(A ) for _ in range(A ): for i in range(_ % 2 , arr_size - 1 , 2 ): if arr[i + 1] < arr[i]: lowercase__ , lowercase__ = arr[i + 1], arr[i] return arr if __name__ == "__main__": lowerCAmelCase_: str = list(range(1_0, 0, -1)) print(F'Original: {arr}. Sorted: {odd_even_transposition(arr)}')
668
"""simple docstring""" from ...utils import is_note_seq_available, is_transformers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .notes_encoder import SpectrogramNotesEncoder from .continous_encoder import SpectrogramContEncoder from .pipeline_spectrogram_diffusion import ( SpectrogramContEncoder, SpectrogramDiffusionPipeline, TaFilmDecoder, ) try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .midi_utils import MidiProcessor
668
1
"""simple docstring""" from typing import List, Optional, Union import numpy as np from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ....feature_extraction_sequence_utils import SequenceFeatureExtractor from ....feature_extraction_utils import BatchFeature from ....file_utils import PaddingStrategy, TensorType from ....utils import logging lowerCAmelCase_: List[str] = logging.get_logger(__name__) class a__ ( _a ): snake_case_ = ["input_features", "attention_mask"] def __init__( self, _UpperCAmelCase=80, _UpperCAmelCase=1_6000, _UpperCAmelCase=0.0, _UpperCAmelCase=10, _UpperCAmelCase=25, _UpperCAmelCase="hamming_window", _UpperCAmelCase=32_768.0, _UpperCAmelCase=0.97, _UpperCAmelCase=1.0, _UpperCAmelCase=True, _UpperCAmelCase=True, _UpperCAmelCase=False, **_UpperCAmelCase, ): '''simple docstring''' super().__init__(feature_size=_UpperCAmelCase, sampling_rate=_UpperCAmelCase, padding_value=_UpperCAmelCase, **_UpperCAmelCase ) lowercase__ = feature_size lowercase__ = sampling_rate lowercase__ = padding_value lowercase__ = hop_length lowercase__ = win_length lowercase__ = frame_signal_scale lowercase__ = preemphasis_coeff lowercase__ = mel_floor lowercase__ = normalize_means lowercase__ = normalize_vars lowercase__ = win_function lowercase__ = return_attention_mask lowercase__ = win_length * sampling_rate // 1000 lowercase__ = hop_length * sampling_rate // 1000 lowercase__ = optimal_fft_length(self.sample_size ) lowercase__ = (self.n_fft // 2) + 1 def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' if self.win_function == "hamming_window": lowercase__ = window_function(window_length=self.sample_size, name=self.win_function, periodic=_UpperCAmelCase ) else: lowercase__ = window_function(window_length=self.sample_size, name=self.win_function ) lowercase__ = mel_filter_bank( num_frequency_bins=self.n_freqs, num_mel_filters=self.feature_size, min_frequency=0.0, max_frequency=self.sampling_rate / 2.0, sampling_rate=self.sampling_rate, ) lowercase__ = spectrogram( one_waveform * self.frame_signal_scale, window=_UpperCAmelCase, frame_length=self.sample_size, hop_length=self.sample_stride, fft_length=self.n_fft, center=_UpperCAmelCase, preemphasis=self.preemphasis_coeff, mel_filters=_UpperCAmelCase, mel_floor=self.mel_floor, log_mel="log", ) return msfc_features.T def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' if self.normalize_means: lowercase__ = x[:input_length].mean(axis=0 ) lowercase__ = np.subtract(_UpperCAmelCase, _UpperCAmelCase ) if self.normalize_vars: lowercase__ = x[:input_length].std(axis=0 ) lowercase__ = np.divide(_UpperCAmelCase, _UpperCAmelCase ) if input_length < x.shape[0]: lowercase__ = padding_value # make sure array is in float32 lowercase__ = x.astype(np.floataa ) return x def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase = None ): '''simple docstring''' lowercase__ = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [self._normalize_one(_UpperCAmelCase, _UpperCAmelCase, self.padding_value ) for x, n in zip(_UpperCAmelCase, _UpperCAmelCase )] def __call__( self, _UpperCAmelCase, _UpperCAmelCase = False, _UpperCAmelCase = None, _UpperCAmelCase = False, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = None, **_UpperCAmelCase, ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' F''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with''' F''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( "It is strongly recommended to pass the ``sampling_rate`` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) lowercase__ = isinstance(_UpperCAmelCase, np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) lowercase__ = is_batched_numpy or ( isinstance(_UpperCAmelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) )) ) if is_batched: lowercase__ = [np.asarray(_UpperCAmelCase, dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(_UpperCAmelCase, np.ndarray ): lowercase__ = np.asarray(_UpperCAmelCase, dtype=np.floataa ) elif isinstance(_UpperCAmelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowercase__ = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowercase__ = [raw_speech] # extract fbank features lowercase__ = [self._extract_mfsc_features(_UpperCAmelCase ) for one_waveform in raw_speech] # convert into correct format for padding lowercase__ = BatchFeature({"input_features": features} ) lowercase__ = self.pad( _UpperCAmelCase, padding=_UpperCAmelCase, max_length=_UpperCAmelCase, truncation=_UpperCAmelCase, pad_to_multiple_of=_UpperCAmelCase, return_attention_mask=_UpperCAmelCase, **_UpperCAmelCase, ) # make sure list is in array format lowercase__ = padded_inputs.get("input_features" ) if isinstance(input_features[0], _UpperCAmelCase ): lowercase__ = [np.asarray(_UpperCAmelCase, dtype=np.floataa ) for feature in input_features] lowercase__ = padded_inputs.get("attention_mask" ) if attention_mask is not None: lowercase__ = [np.asarray(_UpperCAmelCase, dtype=np.intaa ) for array in attention_mask] if self.normalize_means or self.normalize_vars: lowercase__ = ( np.array(_UpperCAmelCase, dtype=np.intaa ) if self._get_padding_strategies(_UpperCAmelCase, max_length=_UpperCAmelCase ) is not PaddingStrategy.DO_NOT_PAD and padding else None ) lowercase__ = self.normalize( padded_inputs["input_features"], attention_mask=_UpperCAmelCase ) if return_tensors is not None: lowercase__ = padded_inputs.convert_to_tensors(_UpperCAmelCase ) return padded_inputs
668
"""simple docstring""" from typing import Any import numpy as np def __a ( A ): '''simple docstring''' return np.array_equal(A , matrix.conjugate().T ) def __a ( A , A ): '''simple docstring''' lowercase__ = v.conjugate().T lowercase__ = v_star.dot(A ) assert isinstance(A , np.ndarray ) return (v_star_dot.dot(A )) / (v_star.dot(A )) def __a ( ): '''simple docstring''' lowercase__ = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] ) lowercase__ = np.array([[1], [2], [3]] ) assert is_hermitian(A ), f'''{a} is not hermitian.''' print(rayleigh_quotient(A , A ) ) lowercase__ = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] ) assert is_hermitian(A ), f'''{a} is not hermitian.''' assert rayleigh_quotient(A , A ) == float(3 ) if __name__ == "__main__": import doctest doctest.testmod() tests()
668
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase_: int = { "configuration_albert": ["ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "AlbertConfig", "AlbertOnnxConfig"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_: Any = ["AlbertTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_: Optional[Any] = ["AlbertTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_: str = [ "ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "AlbertForMaskedLM", "AlbertForMultipleChoice", "AlbertForPreTraining", "AlbertForQuestionAnswering", "AlbertForSequenceClassification", "AlbertForTokenClassification", "AlbertModel", "AlbertPreTrainedModel", "load_tf_weights_in_albert", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_: str = [ "TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFAlbertForMaskedLM", "TFAlbertForMultipleChoice", "TFAlbertForPreTraining", "TFAlbertForQuestionAnswering", "TFAlbertForSequenceClassification", "TFAlbertForTokenClassification", "TFAlbertMainLayer", "TFAlbertModel", "TFAlbertPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_: List[Any] = [ "FlaxAlbertForMaskedLM", "FlaxAlbertForMultipleChoice", "FlaxAlbertForPreTraining", "FlaxAlbertForQuestionAnswering", "FlaxAlbertForSequenceClassification", "FlaxAlbertForTokenClassification", "FlaxAlbertModel", "FlaxAlbertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_albert import AlbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_albert_fast import AlbertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_albert import ( ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, AlbertPreTrainedModel, load_tf_weights_in_albert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_albert import ( TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFAlbertForMaskedLM, TFAlbertForMultipleChoice, TFAlbertForPreTraining, TFAlbertForQuestionAnswering, TFAlbertForSequenceClassification, TFAlbertForTokenClassification, TFAlbertMainLayer, TFAlbertModel, TFAlbertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, FlaxAlbertPreTrainedModel, ) else: import sys lowerCAmelCase_: Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
668
"""simple docstring""" import gc import inspect import unittest import torch from parameterized import parameterized from diffusers import PriorTransformer from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin enable_full_determinism() class a__ ( _a , unittest.TestCase ): snake_case_ = PriorTransformer snake_case_ = "hidden_states" @property def snake_case__ ( self ): '''simple docstring''' lowercase__ = 4 lowercase__ = 8 lowercase__ = 7 lowercase__ = floats_tensor((batch_size, embedding_dim) ).to(_UpperCAmelCase ) lowercase__ = floats_tensor((batch_size, embedding_dim) ).to(_UpperCAmelCase ) lowercase__ = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def snake_case__ ( self, _UpperCAmelCase=0 ): '''simple docstring''' torch.manual_seed(_UpperCAmelCase ) lowercase__ = 4 lowercase__ = 8 lowercase__ = 7 lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase ) lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase ) lowercase__ = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } @property def snake_case__ ( self ): '''simple docstring''' return (4, 8) @property def snake_case__ ( self ): '''simple docstring''' return (4, 8) def snake_case__ ( self ): '''simple docstring''' lowercase__ = { "num_attention_heads": 2, "attention_head_dim": 4, "num_layers": 2, "embedding_dim": 8, "num_embeddings": 7, "additional_embeddings": 4, } lowercase__ = self.dummy_input return init_dict, inputs_dict def snake_case__ ( self ): '''simple docstring''' lowercase__ , lowercase__ = PriorTransformer.from_pretrained( "hf-internal-testing/prior-dummy", output_loading_info=_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) self.assertEqual(len(loading_info["missing_keys"] ), 0 ) model.to(_UpperCAmelCase ) lowercase__ = model(**self.dummy_input )[0] assert hidden_states is not None, "Make sure output is not None" def snake_case__ ( self ): '''simple docstring''' lowercase__ , lowercase__ = self.prepare_init_args_and_inputs_for_common() lowercase__ = self.model_class(**_UpperCAmelCase ) lowercase__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ = [*signature.parameters.keys()] lowercase__ = ["hidden_states", "timestep"] self.assertListEqual(arg_names[:2], _UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy" ) lowercase__ = model.to(_UpperCAmelCase ) if hasattr(_UpperCAmelCase, "set_default_attn_processor" ): model.set_default_attn_processor() lowercase__ = self.get_dummy_seed_input() with torch.no_grad(): lowercase__ = model(**_UpperCAmelCase )[0] lowercase__ = output[0, :5].flatten().cpu() print(_UpperCAmelCase ) # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. lowercase__ = torch.tensor([-1.3_436, -0.2_870, 0.7_538, 0.4_368, -0.0_239] ) self.assertTrue(torch_all_close(_UpperCAmelCase, _UpperCAmelCase, rtol=1E-2 ) ) @slow class a__ ( unittest.TestCase ): def snake_case__ ( self, _UpperCAmelCase=1, _UpperCAmelCase=768, _UpperCAmelCase=77, _UpperCAmelCase=0 ): '''simple docstring''' torch.manual_seed(_UpperCAmelCase ) lowercase__ = batch_size lowercase__ = embedding_dim lowercase__ = num_embeddings lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase ) lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase ) lowercase__ = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def snake_case__ ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @parameterized.expand( [ # fmt: off [13, [-0.5_861, 0.1_283, -0.0_931, 0.0_882, 0.4_476, 0.1_329, -0.0_498, 0.0_640]], [37, [-0.4_913, 0.0_110, -0.0_483, 0.0_541, 0.4_954, -0.0_170, 0.0_354, 0.1_651]], # fmt: on ] ) def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' lowercase__ = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior", subfolder="prior" ) model.to(_UpperCAmelCase ) lowercase__ = self.get_dummy_seed_input(seed=_UpperCAmelCase ) with torch.no_grad(): lowercase__ = model(**_UpperCAmelCase )[0] assert list(sample.shape ) == [1, 768] lowercase__ = sample[0, :8].flatten().cpu() print(_UpperCAmelCase ) lowercase__ = torch.tensor(_UpperCAmelCase ) assert torch_all_close(_UpperCAmelCase, _UpperCAmelCase, atol=1E-3 )
668
1
"""simple docstring""" from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean lowerCAmelCase_: List[str] = 0 lowerCAmelCase_: Any = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] lowerCAmelCase_: Dict = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right lowerCAmelCase_: int = tuple[int, int] class a__ : def __init__( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ): '''simple docstring''' lowercase__ = pos_x lowercase__ = pos_y lowercase__ = (pos_y, pos_x) lowercase__ = goal_x lowercase__ = goal_y lowercase__ = g_cost lowercase__ = parent lowercase__ = self.calculate_heuristic() lowercase__ = self.g_cost + self.h_cost def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.pos_x - self.goal_x lowercase__ = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(_UpperCAmelCase ) + abs(_UpperCAmelCase ) else: return sqrt(dy**2 + dx**2 ) def __lt__( self, _UpperCAmelCase ): '''simple docstring''' return self.f_cost < other.f_cost class a__ : def __init__( self, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' lowercase__ = Node(start[1], start[0], goal[1], goal[0], 0, _UpperCAmelCase ) lowercase__ = Node(goal[1], goal[0], goal[1], goal[0], 9_9999, _UpperCAmelCase ) lowercase__ = [self.start] lowercase__ = [] lowercase__ = False def snake_case__ ( self ): '''simple docstring''' while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() lowercase__ = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: return self.retrace_path(_UpperCAmelCase ) self.closed_nodes.append(_UpperCAmelCase ) lowercase__ = self.get_successors(_UpperCAmelCase ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(_UpperCAmelCase ) else: # retrieve the best current path lowercase__ = self.open_nodes.pop(self.open_nodes.index(_UpperCAmelCase ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(_UpperCAmelCase ) else: self.open_nodes.append(_UpperCAmelCase ) return [self.start.pos] def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' lowercase__ = [] for action in delta: lowercase__ = parent.pos_x + action[1] lowercase__ = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_UpperCAmelCase ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( _UpperCAmelCase, _UpperCAmelCase, self.target.pos_y, self.target.pos_x, parent.g_cost + 1, _UpperCAmelCase, ) ) return successors def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' lowercase__ = node lowercase__ = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) lowercase__ = current_node.parent path.reverse() return path class a__ : def __init__( self, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' lowercase__ = AStar(_UpperCAmelCase, _UpperCAmelCase ) lowercase__ = AStar(_UpperCAmelCase, _UpperCAmelCase ) lowercase__ = False def snake_case__ ( self ): '''simple docstring''' while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() lowercase__ = self.fwd_astar.open_nodes.pop(0 ) lowercase__ = self.bwd_astar.open_nodes.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( _UpperCAmelCase, _UpperCAmelCase ) self.fwd_astar.closed_nodes.append(_UpperCAmelCase ) self.bwd_astar.closed_nodes.append(_UpperCAmelCase ) lowercase__ = current_bwd_node lowercase__ = current_fwd_node lowercase__ = { self.fwd_astar: self.fwd_astar.get_successors(_UpperCAmelCase ), self.bwd_astar: self.bwd_astar.get_successors(_UpperCAmelCase ), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(_UpperCAmelCase ) else: # retrieve the best current path lowercase__ = astar.open_nodes.pop( astar.open_nodes.index(_UpperCAmelCase ) ) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(_UpperCAmelCase ) else: astar.open_nodes.append(_UpperCAmelCase ) return [self.fwd_astar.start.pos] def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' lowercase__ = self.fwd_astar.retrace_path(_UpperCAmelCase ) lowercase__ = self.bwd_astar.retrace_path(_UpperCAmelCase ) bwd_path.pop() bwd_path.reverse() lowercase__ = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] lowerCAmelCase_: Optional[int] = (0, 0) lowerCAmelCase_: Union[str, Any] = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) lowerCAmelCase_: List[str] = time.time() lowerCAmelCase_: Union[str, Any] = AStar(init, goal) lowerCAmelCase_: str = a_star.search() lowerCAmelCase_: List[Any] = time.time() - start_time print(F'AStar execution time = {end_time:f} seconds') lowerCAmelCase_: Any = time.time() lowerCAmelCase_: Dict = BidirectionalAStar(init, goal) lowerCAmelCase_: Tuple = time.time() - bd_start_time print(F'BidirectionalAStar execution time = {bd_end_time:f} seconds')
668
"""simple docstring""" lowerCAmelCase_: Any = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" def __a ( A ): '''simple docstring''' if not isinstance(A , A ): lowercase__ = f'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(A ) lowercase__ = "".join(bin(A )[2:].zfill(8 ) for byte in data ) lowercase__ = len(A ) % 6 != 0 if padding_needed: # The padding that will be added later lowercase__ = b"=" * ((6 - len(A ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(A ) % 6) else: lowercase__ = b"" # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(A ) , 6 ) ).encode() + padding ) def __a ( A ): '''simple docstring''' if not isinstance(A , A ) and not isinstance(A , A ): lowercase__ = ( "argument should be a bytes-like object or ASCII string, " f'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(A ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(A , A ): try: lowercase__ = encoded_data.decode("utf-8" ) except UnicodeDecodeError: raise ValueError("base64 encoded data should only contain ASCII characters" ) lowercase__ = encoded_data.count("=" ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(A ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one lowercase__ = encoded_data[:-padding] lowercase__ = "".join( bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: lowercase__ = "".join( bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data ) lowercase__ = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(A ) , 8 ) ] return bytes(A ) if __name__ == "__main__": import doctest doctest.testmod()
668
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_: List[Any] = logging.get_logger(__name__) lowerCAmelCase_: Optional[Any] = { "google/canine-s": "https://huggingface.co/google/canine-s/resolve/main/config.json", # See all CANINE models at https://huggingface.co/models?filter=canine } class a__ ( _a ): snake_case_ = "canine" def __init__( self, _UpperCAmelCase=768, _UpperCAmelCase=12, _UpperCAmelCase=12, _UpperCAmelCase=3072, _UpperCAmelCase="gelu", _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=1_6384, _UpperCAmelCase=16, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-12, _UpperCAmelCase=0, _UpperCAmelCase=0Xe000, _UpperCAmelCase=0Xe001, _UpperCAmelCase=4, _UpperCAmelCase=4, _UpperCAmelCase=8, _UpperCAmelCase=1_6384, _UpperCAmelCase=128, **_UpperCAmelCase, ): '''simple docstring''' super().__init__(pad_token_id=_UpperCAmelCase, bos_token_id=_UpperCAmelCase, eos_token_id=_UpperCAmelCase, **_UpperCAmelCase ) lowercase__ = max_position_embeddings lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = initializer_range lowercase__ = type_vocab_size lowercase__ = layer_norm_eps # Character config: lowercase__ = downsampling_rate lowercase__ = upsampling_kernel_size lowercase__ = num_hash_functions lowercase__ = num_hash_buckets lowercase__ = local_transformer_stride
668
"""simple docstring""" from sympy import diff, lambdify, symbols from sympy.functions import * # noqa: F403 def __a ( A , A , A = "x" , A = 10**-10 , A = 1 , ): '''simple docstring''' lowercase__ = symbols(A ) lowercase__ = lambdify(A , A ) lowercase__ = lambdify(A , diff(A , A ) ) lowercase__ = starting_point while True: if diff_function(A ) != 0: lowercase__ = prev_guess - multiplicity * func(A ) / diff_function( A ) else: raise ZeroDivisionError("Could not find root" ) from None # Precision is checked by comparing the difference of consecutive guesses if abs(next_guess - prev_guess ) < precision: return next_guess lowercase__ = next_guess # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(F'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}') # Find root of polynomial # Find fourth Root of 5 print(F'The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}') # Find value of e print( "The root of log(y) - 1 = 0 is ", F'{newton_raphson("log(y) - 1", 2, variable="y")}', ) # Exponential Roots print( "The root of exp(x) - 1 = 0 is", F'{newton_raphson("exp(x) - 1", 1_0, precision=0.005)}', ) # Find root of cos(x) print(F'The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}')
668
1
"""simple docstring""" import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py lowerCAmelCase_: int = "src/transformers" # This is to make sure the transformers module imported is the one in the repo. lowerCAmelCase_: int = direct_transformers_import(PATH_TO_TRANSFORMERS) lowerCAmelCase_: List[Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` lowerCAmelCase_: Optional[int] = re.compile(R"\[(.+?)\]\((https://huggingface\.co/.+?)\)") lowerCAmelCase_: Dict = { "DecisionTransformerConfig", "EncoderDecoderConfig", "MusicgenConfig", "RagConfig", "SpeechEncoderDecoderConfig", "TimmBackboneConfig", "VisionEncoderDecoderConfig", "VisionTextDualEncoderConfig", "LlamaConfig", } def __a ( A ): '''simple docstring''' lowercase__ = None # source code of `config_class` lowercase__ = inspect.getsource(A ) lowercase__ = _re_checkpoint.findall(A ) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith("/" ): lowercase__ = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link lowercase__ = f'''https://huggingface.co/{ckpt_name}''' if ckpt_link == ckpt_link_from_name: lowercase__ = ckpt_name break return checkpoint def __a ( ): '''simple docstring''' lowercase__ = [] for config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue lowercase__ = get_checkpoint_from_config_class(A ) lowercase__ = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(A ) if len(A ) > 0: lowercase__ = "\n".join(sorted(A ) ) raise ValueError(f'''The following configurations don\'t contain any valid checkpoint:\n{message}''' ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
668
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase_: Union[str, Any] = { "configuration_distilbert": [ "DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DistilBertConfig", "DistilBertOnnxConfig", ], "tokenization_distilbert": ["DistilBertTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_: Union[str, Any] = ["DistilBertTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_: Any = [ "DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "DistilBertForMaskedLM", "DistilBertForMultipleChoice", "DistilBertForQuestionAnswering", "DistilBertForSequenceClassification", "DistilBertForTokenClassification", "DistilBertModel", "DistilBertPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_: Tuple = [ "TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFDistilBertForMaskedLM", "TFDistilBertForMultipleChoice", "TFDistilBertForQuestionAnswering", "TFDistilBertForSequenceClassification", "TFDistilBertForTokenClassification", "TFDistilBertMainLayer", "TFDistilBertModel", "TFDistilBertPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_: Optional[Any] = [ "FlaxDistilBertForMaskedLM", "FlaxDistilBertForMultipleChoice", "FlaxDistilBertForQuestionAnswering", "FlaxDistilBertForSequenceClassification", "FlaxDistilBertForTokenClassification", "FlaxDistilBertModel", "FlaxDistilBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_distilbert import ( DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertOnnxConfig, ) from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel, ) else: import sys lowerCAmelCase_: Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
668
1
"""simple docstring""" lowerCAmelCase_: Union[str, Any] = { "Pillow": "Pillow", "accelerate": "accelerate>=0.11.0", "compel": "compel==0.1.8", "black": "black~=23.1", "datasets": "datasets", "filelock": "filelock", "flax": "flax>=0.4.1", "hf-doc-builder": "hf-doc-builder>=0.3.0", "huggingface-hub": "huggingface-hub>=0.13.2", "requests-mock": "requests-mock==1.10.0", "importlib_metadata": "importlib_metadata", "invisible-watermark": "invisible-watermark", "isort": "isort>=5.5.4", "jax": "jax>=0.2.8,!=0.3.2", "jaxlib": "jaxlib>=0.1.65", "Jinja2": "Jinja2", "k-diffusion": "k-diffusion>=0.0.12", "torchsde": "torchsde", "note_seq": "note_seq", "librosa": "librosa", "numpy": "numpy", "omegaconf": "omegaconf", "parameterized": "parameterized", "protobuf": "protobuf>=3.20.3,<4", "pytest": "pytest", "pytest-timeout": "pytest-timeout", "pytest-xdist": "pytest-xdist", "ruff": "ruff>=0.0.241", "safetensors": "safetensors", "sentencepiece": "sentencepiece>=0.1.91,!=0.1.92", "scipy": "scipy", "onnx": "onnx", "regex": "regex!=2019.12.17", "requests": "requests", "tensorboard": "tensorboard", "torch": "torch>=1.4", "torchvision": "torchvision", "transformers": "transformers>=4.25.1", "urllib3": "urllib3<=2.0.0", }
668
"""simple docstring""" from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging lowerCAmelCase_: Union[str, Any] = logging.get_logger(__name__) class a__ ( _a ): snake_case_ = ["audio_values", "audio_mask"] def __init__( self, _UpperCAmelCase=2048, _UpperCAmelCase=1, _UpperCAmelCase=[16, 16], _UpperCAmelCase=128, _UpperCAmelCase=4_4100, _UpperCAmelCase=86, _UpperCAmelCase=2048, _UpperCAmelCase=0.0, **_UpperCAmelCase, ): '''simple docstring''' super().__init__( feature_size=_UpperCAmelCase, sampling_rate=_UpperCAmelCase, padding_value=_UpperCAmelCase, **_UpperCAmelCase, ) lowercase__ = spectrogram_length lowercase__ = num_channels lowercase__ = patch_size lowercase__ = feature_size // self.patch_size[1] lowercase__ = n_fft lowercase__ = sampling_rate // hop_length_to_sampling_rate lowercase__ = sampling_rate lowercase__ = padding_value lowercase__ = mel_filter_bank( num_frequency_bins=1 + n_fft // 2, num_mel_filters=_UpperCAmelCase, min_frequency=0.0, max_frequency=22_050.0, sampling_rate=_UpperCAmelCase, norm="slaney", mel_scale="slaney", ).T def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' lowercase__ = spectrogram( _UpperCAmelCase, window_function(self.n_fft, "hann" ), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters.T, log_mel="dB", db_range=80.0, ) lowercase__ = log_spec[:, :-1] lowercase__ = log_spec - 20.0 lowercase__ = np.clip(log_spec / 40.0, -2.0, 0.0 ) + 1.0 return log_spec def __call__( self, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = True, _UpperCAmelCase = None, _UpperCAmelCase = False, _UpperCAmelCase = False, **_UpperCAmelCase, ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( "This feature extractor is set to support sampling rate" F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled''' F''' with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) lowercase__ = isinstance(_UpperCAmelCase, np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) lowercase__ = is_batched_numpy or ( isinstance(_UpperCAmelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) )) ) if is_batched: lowercase__ = [np.asarray([speech], dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(_UpperCAmelCase, np.ndarray ): lowercase__ = np.asarray(_UpperCAmelCase, dtype=np.floataa ) elif isinstance(_UpperCAmelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowercase__ = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowercase__ = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis lowercase__ = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0], _UpperCAmelCase ): lowercase__ = [np.asarray(_UpperCAmelCase, dtype=np.floataa ) for feature in audio_features] # Create audio attention mask lowercase__ = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: lowercase__ = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] lowercase__ = np.array(_UpperCAmelCase ).astype(np.floataa ) # convert into correct format for padding lowercase__ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch lowercase__ = np.ones([len(_UpperCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) lowercase__ = padded_audio_features * self.padding_value for i in range(len(_UpperCAmelCase ) ): lowercase__ = audio_features[i] lowercase__ = feature # return as BatchFeature if return_attention_mask: lowercase__ = {"audio_values": padded_audio_features, "audio_mask": audio_mask} else: lowercase__ = {"audio_values": padded_audio_features} lowercase__ = BatchFeature(data=_UpperCAmelCase, tensor_type=_UpperCAmelCase ) return encoded_inputs
668
1
"""simple docstring""" import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging lowerCAmelCase_: Optional[int] = logging.get_logger(__name__) class a__ ( _a ): snake_case_ = ["input_features", "is_longer"] def __init__( self, _UpperCAmelCase=64, _UpperCAmelCase=4_8000, _UpperCAmelCase=480, _UpperCAmelCase=10, _UpperCAmelCase=1024, _UpperCAmelCase=0.0, _UpperCAmelCase=False, _UpperCAmelCase = 0, _UpperCAmelCase = 1_4000, _UpperCAmelCase = None, _UpperCAmelCase = "fusion", _UpperCAmelCase = "repeatpad", **_UpperCAmelCase, ): '''simple docstring''' super().__init__( feature_size=_UpperCAmelCase, sampling_rate=_UpperCAmelCase, padding_value=_UpperCAmelCase, return_attention_mask=_UpperCAmelCase, **_UpperCAmelCase, ) lowercase__ = top_db lowercase__ = truncation lowercase__ = padding lowercase__ = fft_window_size lowercase__ = (fft_window_size >> 1) + 1 lowercase__ = hop_length lowercase__ = max_length_s lowercase__ = max_length_s * sampling_rate lowercase__ = sampling_rate lowercase__ = frequency_min lowercase__ = frequency_max lowercase__ = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins, num_mel_filters=_UpperCAmelCase, min_frequency=_UpperCAmelCase, max_frequency=_UpperCAmelCase, sampling_rate=_UpperCAmelCase, norm=_UpperCAmelCase, mel_scale="htk", ) lowercase__ = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins, num_mel_filters=_UpperCAmelCase, min_frequency=_UpperCAmelCase, max_frequency=_UpperCAmelCase, sampling_rate=_UpperCAmelCase, norm="slaney", mel_scale="slaney", ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = copy.deepcopy(self.__dict__ ) lowercase__ = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase = None ): '''simple docstring''' lowercase__ = spectrogram( _UpperCAmelCase, window_function(self.fft_window_size, "hann" ), frame_length=self.fft_window_size, hop_length=self.hop_length, power=2.0, mel_filters=_UpperCAmelCase, log_mel="dB", ) return log_mel_spectrogram.T def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' lowercase__ = np.array_split(list(range(0, total_frames - chunk_frames + 1 ) ), 3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk lowercase__ = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk lowercase__ = [0] # randomly choose index for each part lowercase__ = np.random.choice(ranges[0] ) lowercase__ = np.random.choice(ranges[1] ) lowercase__ = np.random.choice(ranges[2] ) lowercase__ = mel[idx_front : idx_front + chunk_frames, :] lowercase__ = mel[idx_middle : idx_middle + chunk_frames, :] lowercase__ = mel[idx_back : idx_back + chunk_frames, :] lowercase__ = torch.tensor(mel[None, None, :] ) lowercase__ = torch.nn.functional.interpolate( _UpperCAmelCase, size=[chunk_frames, 64], mode="bilinear", align_corners=_UpperCAmelCase ) lowercase__ = mel_shrink[0][0].numpy() lowercase__ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back], axis=0 ) return mel_fusion def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' if waveform.shape[0] > max_length: if truncation == "rand_trunc": lowercase__ = True # random crop to max_length (for compatibility) -> this should be handled by self.pad lowercase__ = len(_UpperCAmelCase ) - max_length lowercase__ = np.random.randint(0, overflow + 1 ) lowercase__ = waveform[idx : idx + max_length] lowercase__ = self._np_extract_fbank_features(_UpperCAmelCase, self.mel_filters_slaney )[None, :] elif truncation == "fusion": lowercase__ = self._np_extract_fbank_features(_UpperCAmelCase, self.mel_filters ) lowercase__ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed lowercase__ = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. lowercase__ = np.stack([mel, mel, mel, mel], axis=0 ) lowercase__ = False else: lowercase__ = self._random_mel_fusion(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) lowercase__ = True else: raise NotImplementedError(F'''data_truncating {truncation} not implemented''' ) else: lowercase__ = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": lowercase__ = int(max_length / len(_UpperCAmelCase ) ) lowercase__ = np.stack(np.tile(_UpperCAmelCase, n_repeat + 1 ) )[:max_length] if padding == "repeatpad": lowercase__ = int(max_length / len(_UpperCAmelCase ) ) lowercase__ = np.stack(np.tile(_UpperCAmelCase, _UpperCAmelCase ) ) lowercase__ = np.pad(_UpperCAmelCase, (0, max_length - waveform.shape[0]), mode="constant", constant_values=0 ) if truncation == "fusion": lowercase__ = self._np_extract_fbank_features(_UpperCAmelCase, self.mel_filters ) lowercase__ = np.stack([input_mel, input_mel, input_mel, input_mel], axis=0 ) else: lowercase__ = self._np_extract_fbank_features(_UpperCAmelCase, self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = None, **_UpperCAmelCase, ): '''simple docstring''' lowercase__ = truncation if truncation is not None else self.truncation lowercase__ = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a''' F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input''' F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) lowercase__ = isinstance(_UpperCAmelCase, np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) lowercase__ = is_batched_numpy or ( isinstance(_UpperCAmelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) )) ) if is_batched: lowercase__ = [np.asarray(_UpperCAmelCase, dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(_UpperCAmelCase, np.ndarray ): lowercase__ = np.asarray(_UpperCAmelCase, dtype=np.floataa ) elif isinstance(_UpperCAmelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowercase__ = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowercase__ = [np.asarray(_UpperCAmelCase )] # convert to mel spectrogram, truncate and pad if needed. lowercase__ = [ self._get_input_mel(_UpperCAmelCase, max_length if max_length else self.nb_max_samples, _UpperCAmelCase, _UpperCAmelCase ) for waveform in raw_speech ] lowercase__ = [] lowercase__ = [] for mel, longer in padded_inputs: input_mel.append(_UpperCAmelCase ) is_longer.append(_UpperCAmelCase ) if truncation == "fusion" and sum(_UpperCAmelCase ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer lowercase__ = np.random.randint(0, len(_UpperCAmelCase ) ) lowercase__ = True if isinstance(input_mel[0], _UpperCAmelCase ): lowercase__ = [np.asarray(_UpperCAmelCase, dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool lowercase__ = [[longer] for longer in is_longer] lowercase__ = {"input_features": input_mel, "is_longer": is_longer} lowercase__ = BatchFeature(_UpperCAmelCase ) if return_tensors is not None: lowercase__ = input_features.convert_to_tensors(_UpperCAmelCase ) return input_features
668
"""simple docstring""" from __future__ import annotations import math def __a ( A ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True lowerCAmelCase_: Optional[Any] = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)] def __a ( A ): '''simple docstring''' if not isinstance(A , A ): raise ValueError("n must be an integer" ) if n <= 0: raise ValueError("n must be >= 0" ) lowercase__ = [] for num in range(len(A ) ): lowercase__ = 0 while 2 * i * i <= odd_composites[num]: lowercase__ = odd_composites[num] - 2 * i * i if is_prime(A ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(A ) == n: return list_nums return [] def __a ( ): '''simple docstring''' return compute_nums(1 )[0] if __name__ == "__main__": print(F'{solution() = }')
668
1
"""simple docstring""" from __future__ import annotations def __a ( A , A ): '''simple docstring''' lowercase__ = [] lowercase__ = [] lowercase__ = 0 lowercase__ = sum(A ) create_state_space_tree(A , A , A , A , A , A ) return result def __a ( A , A , A , A , A , A , ): '''simple docstring''' if sum(A ) > max_sum or (remaining_nums_sum + sum(A )) < max_sum: return if sum(A ) == max_sum: result.append(A ) return for index in range(A , len(A ) ): create_state_space_tree( A , A , index + 1 , [*path, nums[index]] , A , remaining_nums_sum - nums[index] , ) lowerCAmelCase_: List[Any] = [3, 3_4, 4, 1_2, 5, 2] lowerCAmelCase_: str = 9 lowerCAmelCase_: Union[str, Any] = generate_sum_of_subsets_soln(nums, max_sum) print(*result)
668
"""simple docstring""" import os import sys lowerCAmelCase_: Any = os.path.join(os.path.dirname(__file__), "src") sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) lowerCAmelCase_: Union[str, Any] = [ "torch", "numpy", "tokenizers", "filelock", "requests", "tqdm", "regex", "sentencepiece", "sacremoses", "importlib_metadata", "huggingface_hub", ] @add_start_docstrings(AutoConfig.__doc__ ) def __a ( *A , **A ): '''simple docstring''' return AutoConfig.from_pretrained(*A , **A ) @add_start_docstrings(AutoTokenizer.__doc__ ) def __a ( *A , **A ): '''simple docstring''' return AutoTokenizer.from_pretrained(*A , **A ) @add_start_docstrings(AutoModel.__doc__ ) def __a ( *A , **A ): '''simple docstring''' return AutoModel.from_pretrained(*A , **A ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def __a ( *A , **A ): '''simple docstring''' return AutoModelForCausalLM.from_pretrained(*A , **A ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def __a ( *A , **A ): '''simple docstring''' return AutoModelForMaskedLM.from_pretrained(*A , **A ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def __a ( *A , **A ): '''simple docstring''' return AutoModelForSequenceClassification.from_pretrained(*A , **A ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def __a ( *A , **A ): '''simple docstring''' return AutoModelForQuestionAnswering.from_pretrained(*A , **A )
668
1
"""simple docstring""" import unittest import numpy as np from transformers import AlbertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.albert.modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, ) class a__ ( unittest.TestCase ): def __init__( self, _UpperCAmelCase, _UpperCAmelCase=13, _UpperCAmelCase=7, _UpperCAmelCase=True, _UpperCAmelCase=True, _UpperCAmelCase=True, _UpperCAmelCase=True, _UpperCAmelCase=99, _UpperCAmelCase=32, _UpperCAmelCase=5, _UpperCAmelCase=4, _UpperCAmelCase=37, _UpperCAmelCase="gelu", _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=512, _UpperCAmelCase=16, _UpperCAmelCase=2, _UpperCAmelCase=0.02, _UpperCAmelCase=4, ): '''simple docstring''' lowercase__ = parent lowercase__ = batch_size lowercase__ = seq_length lowercase__ = is_training lowercase__ = use_attention_mask lowercase__ = use_token_type_ids lowercase__ = use_labels lowercase__ = vocab_size lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = max_position_embeddings lowercase__ = type_vocab_size lowercase__ = type_sequence_label_size lowercase__ = initializer_range lowercase__ = num_choices def snake_case__ ( self ): '''simple docstring''' lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) lowercase__ = None if self.use_attention_mask: lowercase__ = random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ = None if self.use_token_type_ids: lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size ) lowercase__ = AlbertConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=_UpperCAmelCase, initializer_range=self.initializer_range, ) return config, input_ids, token_type_ids, attention_mask def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs lowercase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict @require_flax class a__ ( _a , unittest.TestCase ): snake_case_ = ( ( FlaxAlbertModel, FlaxAlbertForPreTraining, FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertForQuestionAnswering, ) if is_flax_available() else () ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = FlaxAlbertModelTester(self ) @slow def snake_case__ ( self ): '''simple docstring''' for model_class_name in self.all_model_classes: lowercase__ = model_class_name.from_pretrained("albert-base-v2" ) lowercase__ = model(np.ones((1, 1) ) ) self.assertIsNotNone(_UpperCAmelCase ) @require_flax class a__ ( unittest.TestCase ): @slow def snake_case__ ( self ): '''simple docstring''' lowercase__ = FlaxAlbertModel.from_pretrained("albert-base-v2" ) lowercase__ = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) lowercase__ = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) lowercase__ = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase )[0] lowercase__ = (1, 11, 768) self.assertEqual(output.shape, _UpperCAmelCase ) lowercase__ = np.array( [[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4], _UpperCAmelCase, atol=1E-4 ) )
668
"""simple docstring""" import unittest from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow if is_flax_available(): import jax from transformers.models.auto.modeling_flax_auto import FlaxAutoModel from transformers.models.bert.modeling_flax_bert import FlaxBertModel from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel @require_flax class a__ ( unittest.TestCase ): @slow def snake_case__ ( self ): '''simple docstring''' for model_name in ["bert-base-cased", "bert-large-uncased"]: with self.subTest(_UpperCAmelCase ): lowercase__ = AutoConfig.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase ) lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase ) @slow def snake_case__ ( self ): '''simple docstring''' for model_name in ["roberta-base", "roberta-large"]: with self.subTest(_UpperCAmelCase ): lowercase__ = AutoConfig.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase ) lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase ) @slow def snake_case__ ( self ): '''simple docstring''' for model_name in ["bert-base-cased", "bert-large-uncased"]: lowercase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase ) lowercase__ = FlaxBertModel.from_pretrained(_UpperCAmelCase ) lowercase__ = tokenizer("Do you support jax jitted function?", return_tensors=TensorType.JAX ) @jax.jit def eval(**_UpperCAmelCase ): return model(**_UpperCAmelCase ) eval(**_UpperCAmelCase ).block_until_ready() @slow def snake_case__ ( self ): '''simple docstring''' for model_name in ["roberta-base", "roberta-large"]: lowercase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase ) lowercase__ = FlaxRobertaModel.from_pretrained(_UpperCAmelCase ) lowercase__ = tokenizer("Do you support jax jitted function?", return_tensors=TensorType.JAX ) @jax.jit def eval(**_UpperCAmelCase ): return model(**_UpperCAmelCase ) eval(**_UpperCAmelCase ).block_until_ready() def snake_case__ ( self ): '''simple docstring''' with self.assertRaisesRegex( _UpperCAmelCase, "bert-base is not a local folder and is not a valid model identifier" ): lowercase__ = FlaxAutoModel.from_pretrained("bert-base" ) def snake_case__ ( self ): '''simple docstring''' with self.assertRaisesRegex( _UpperCAmelCase, R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase, revision="aaaaaa" ) def snake_case__ ( self ): '''simple docstring''' with self.assertRaisesRegex( _UpperCAmelCase, "hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack", ): lowercase__ = FlaxAutoModel.from_pretrained("hf-internal-testing/config-no-model" ) def snake_case__ ( self ): '''simple docstring''' with self.assertRaisesRegex(_UpperCAmelCase, "Use `from_pt=True` to load this model" ): lowercase__ = FlaxAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
668
1
"""simple docstring""" import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def __a ( A , A , A ): '''simple docstring''' lowercase__ = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value") lowercase__ = ( ("layer.", "layer_"), ("word_embeddings.weight", "word_embeddings"), ("position_embeddings.weight", "position_embeddings"), ("token_type_embeddings.weight", "token_type_embeddings"), (".", "/"), ("LayerNorm/weight", "LayerNorm/gamma"), ("LayerNorm/bias", "LayerNorm/beta"), ("weight", "kernel"), ) if not os.path.isdir(A ): os.makedirs(A ) lowercase__ = model.state_dict() def to_tf_var_name(A ): for patt, repl in iter(A ): lowercase__ = name.replace(A , A ) return f'''bert/{name}''' def create_tf_var(A , A , A ): lowercase__ = tf.dtypes.as_dtype(tensor.dtype ) lowercase__ = tf.get_variable(dtype=A , shape=tensor.shape , name=A , initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(A ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: lowercase__ = to_tf_var_name(A ) lowercase__ = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): lowercase__ = torch_tensor.T lowercase__ = create_tf_var(tensor=A , name=A , session=A ) tf.keras.backend.set_value(A , A ) lowercase__ = session.run(A ) print(f'''Successfully created {tf_name}: {np.allclose(A , A )}''' ) lowercase__ = tf.train.Saver(tf.trainable_variables() ) saver.save(A , os.path.join(A , model_name.replace("-" , "_" ) + ".ckpt" ) ) def __a ( A=None ): '''simple docstring''' lowercase__ = argparse.ArgumentParser() parser.add_argument("--model_name" , type=A , required=A , help="model name e.g. bert-base-uncased" ) parser.add_argument( "--cache_dir" , type=A , default=A , required=A , help="Directory containing pytorch model" ) parser.add_argument("--pytorch_model_path" , type=A , required=A , help="/path/to/<pytorch-model-name>.bin" ) parser.add_argument("--tf_cache_dir" , type=A , required=A , help="Directory in which to save tensorflow model" ) lowercase__ = parser.parse_args(A ) lowercase__ = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , ) convert_pytorch_checkpoint_to_tf(model=A , ckpt_dir=args.tf_cache_dir , model_name=args.model_name ) if __name__ == "__main__": main()
668
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase_: str = logging.get_logger(__name__) lowerCAmelCase_: List[Any] = { "facebook/data2vec-vision-base-ft": ( "https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json" ), } class a__ ( _a ): snake_case_ = "data2vec-vision" def __init__( self, _UpperCAmelCase=768, _UpperCAmelCase=12, _UpperCAmelCase=12, _UpperCAmelCase=3072, _UpperCAmelCase="gelu", _UpperCAmelCase=0.0, _UpperCAmelCase=0.0, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-12, _UpperCAmelCase=224, _UpperCAmelCase=16, _UpperCAmelCase=3, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=True, _UpperCAmelCase=[3, 5, 7, 11], _UpperCAmelCase=[1, 2, 3, 6], _UpperCAmelCase=True, _UpperCAmelCase=0.4, _UpperCAmelCase=256, _UpperCAmelCase=1, _UpperCAmelCase=False, _UpperCAmelCase=255, **_UpperCAmelCase, ): '''simple docstring''' super().__init__(**_UpperCAmelCase ) lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = initializer_range lowercase__ = layer_norm_eps lowercase__ = image_size lowercase__ = patch_size lowercase__ = num_channels lowercase__ = use_mask_token lowercase__ = use_absolute_position_embeddings lowercase__ = use_relative_position_bias lowercase__ = use_shared_relative_position_bias lowercase__ = layer_scale_init_value lowercase__ = drop_path_rate lowercase__ = use_mean_pooling # decode head attributes (semantic segmentation) lowercase__ = out_indices lowercase__ = pool_scales # auxiliary head attributes (semantic segmentation) lowercase__ = use_auxiliary_head lowercase__ = auxiliary_loss_weight lowercase__ = auxiliary_channels lowercase__ = auxiliary_num_convs lowercase__ = auxiliary_concat_input lowercase__ = semantic_loss_ignore_index class a__ ( _a ): snake_case_ = version.parse("1.11" ) @property def snake_case__ ( self ): '''simple docstring''' return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def snake_case__ ( self ): '''simple docstring''' return 1E-4
668
1
"""simple docstring""" import argparse import json import os from collections import OrderedDict import torch from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def __a ( A , A , A , A , A ): '''simple docstring''' with open(A ) as metadata_file: lowercase__ = json.load(A ) lowercase__ = LukeConfig(use_entity_aware_attention=A , **metadata["model_config"] ) # Load in the weights from the checkpoint_path lowercase__ = torch.load(A , map_location="cpu" )["module"] # Load the entity vocab file lowercase__ = load_original_entity_vocab(A ) # add an entry for [MASK2] lowercase__ = max(entity_vocab.values() ) + 1 config.entity_vocab_size += 1 lowercase__ = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] ) # Add special tokens to the token vocabulary for downstream tasks lowercase__ = AddedToken("<ent>" , lstrip=A , rstrip=A ) lowercase__ = AddedToken("<ent2>" , lstrip=A , rstrip=A ) tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(f'''Saving tokenizer to {pytorch_dump_folder_path}''' ) tokenizer.save_pretrained(A ) with open(os.path.join(A , "tokenizer_config.json" ) , "r" ) as f: lowercase__ = json.load(A ) lowercase__ = "MLukeTokenizer" with open(os.path.join(A , "tokenizer_config.json" ) , "w" ) as f: json.dump(A , A ) with open(os.path.join(A , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f: json.dump(A , A ) lowercase__ = MLukeTokenizer.from_pretrained(A ) # Initialize the embeddings of the special tokens lowercase__ = tokenizer.convert_tokens_to_ids(["@"] )[0] lowercase__ = tokenizer.convert_tokens_to_ids(["#"] )[0] lowercase__ = state_dict["embeddings.word_embeddings.weight"] lowercase__ = word_emb[ent_init_index].unsqueeze(0 ) lowercase__ = word_emb[enta_init_index].unsqueeze(0 ) lowercase__ = torch.cat([word_emb, ent_emb, enta_emb] ) # add special tokens for 'entity_predictions.bias' for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]: lowercase__ = state_dict[bias_name] lowercase__ = decoder_bias[ent_init_index].unsqueeze(0 ) lowercase__ = decoder_bias[enta_init_index].unsqueeze(0 ) lowercase__ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: lowercase__ = f'''encoder.layer.{layer_index}.attention.self.''' lowercase__ = state_dict[prefix + matrix_name] lowercase__ = state_dict[prefix + matrix_name] lowercase__ = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks lowercase__ = state_dict["entity_embeddings.entity_embeddings.weight"] lowercase__ = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 ) lowercase__ = torch.cat([entity_emb, entity_mask_emb] ) # add [MASK2] for 'entity_predictions.bias' lowercase__ = state_dict["entity_predictions.bias"] lowercase__ = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 ) lowercase__ = torch.cat([entity_prediction_bias, entity_mask_bias] ) lowercase__ = LukeForMaskedLM(config=A ).eval() state_dict.pop("entity_predictions.decoder.weight" ) state_dict.pop("lm_head.decoder.weight" ) state_dict.pop("lm_head.decoder.bias" ) lowercase__ = OrderedDict() for key, value in state_dict.items(): if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )): lowercase__ = state_dict[key] else: lowercase__ = state_dict[key] lowercase__ , lowercase__ = model.load_state_dict(A , strict=A ) if set(A ) != {"luke.embeddings.position_ids"}: raise ValueError(f'''Unexpected unexpected_keys: {unexpected_keys}''' ) if set(A ) != { "lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight", }: raise ValueError(f'''Unexpected missing_keys: {missing_keys}''' ) model.tie_weights() assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all() assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all() # Check outputs lowercase__ = MLukeTokenizer.from_pretrained(A , task="entity_classification" ) lowercase__ = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)." lowercase__ = (0, 9) lowercase__ = tokenizer(A , entity_spans=[span] , return_tensors="pt" ) lowercase__ = model(**A ) # Verify word hidden states if model_size == "large": raise NotImplementedError else: # base lowercase__ = torch.Size((1, 33, 7_68) ) lowercase__ = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( f'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , A , atol=1e-4 ): raise ValueError # Verify entity hidden states if model_size == "large": raise NotImplementedError else: # base lowercase__ = torch.Size((1, 1, 7_68) ) lowercase__ = torch.tensor([[-0.1482, 0.0609, 0.0322]] ) if not (outputs.entity_last_hidden_state.shape == expected_shape): raise ValueError( f'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is''' f''' {expected_shape}''' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , A , atol=1e-4 ): raise ValueError # Verify masked word/entity prediction lowercase__ = MLukeTokenizer.from_pretrained(A ) lowercase__ = "Tokyo is the capital of <mask>." lowercase__ = (24, 30) lowercase__ = tokenizer(A , entity_spans=[span] , return_tensors="pt" ) lowercase__ = model(**A ) lowercase__ = encoding["input_ids"][0].tolist() lowercase__ = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) ) lowercase__ = outputs.logits[0][mask_position_id].argmax(dim=-1 ) assert "Japan" == tokenizer.decode(A ) lowercase__ = outputs.entity_logits[0][0].argmax().item() lowercase__ = [ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id ] assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan" # Finally, save our PyTorch model and tokenizer print("Saving PyTorch model to {}".format(A ) ) model.save_pretrained(A ) def __a ( A ): '''simple docstring''' lowercase__ = ["[MASK]", "[PAD]", "[UNK]"] lowercase__ = [json.loads(A ) for line in open(A )] lowercase__ = {} for entry in data: lowercase__ = entry["id"] for entity_name, language in entry["entities"]: if entity_name in SPECIAL_TOKENS: lowercase__ = entity_id break lowercase__ = f'''{language}:{entity_name}''' lowercase__ = entity_id return new_mapping if __name__ == "__main__": lowerCAmelCase_: Any = argparse.ArgumentParser() # Required parameters parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.") parser.add_argument( "--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration." ) parser.add_argument( "--entity_vocab_path", default=None, type=str, help="Path to an entity_vocab.tsv file, containing the entity vocabulary.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model." ) parser.add_argument( "--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted." ) lowerCAmelCase_: List[str] = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
668
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_: List[Any] = logging.get_logger(__name__) lowerCAmelCase_: int = { "microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json", "microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json", } class a__ ( _a ): snake_case_ = "markuplm" def __init__( self, _UpperCAmelCase=3_0522, _UpperCAmelCase=768, _UpperCAmelCase=12, _UpperCAmelCase=12, _UpperCAmelCase=3072, _UpperCAmelCase="gelu", _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=512, _UpperCAmelCase=2, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-12, _UpperCAmelCase=0, _UpperCAmelCase=0, _UpperCAmelCase=2, _UpperCAmelCase=256, _UpperCAmelCase=1024, _UpperCAmelCase=216, _UpperCAmelCase=1001, _UpperCAmelCase=32, _UpperCAmelCase=50, _UpperCAmelCase="absolute", _UpperCAmelCase=True, _UpperCAmelCase=None, **_UpperCAmelCase, ): '''simple docstring''' super().__init__( pad_token_id=_UpperCAmelCase, bos_token_id=_UpperCAmelCase, eos_token_id=_UpperCAmelCase, **_UpperCAmelCase, ) lowercase__ = vocab_size lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = hidden_act lowercase__ = intermediate_size lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = max_position_embeddings lowercase__ = type_vocab_size lowercase__ = initializer_range lowercase__ = layer_norm_eps lowercase__ = position_embedding_type lowercase__ = use_cache lowercase__ = classifier_dropout # additional properties lowercase__ = max_depth lowercase__ = max_xpath_tag_unit_embeddings lowercase__ = max_xpath_subs_unit_embeddings lowercase__ = tag_pad_id lowercase__ = subs_pad_id lowercase__ = xpath_unit_hidden_size
668
1
"""simple docstring""" import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def __a ( A , A ): '''simple docstring''' assert isinstance(A , A ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def __a ( A , A , A ): '''simple docstring''' lowercase__ = tmp_path / "cache" lowercase__ = {"text": "string"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowercase__ = TextDatasetReader(A , cache_dir=A , keep_in_memory=A ).read() _check_text_dataset(A , A ) @pytest.mark.parametrize( "features" , [ None, {"text": "string"}, {"text": "int32"}, {"text": "float32"}, ] , ) def __a ( A , A , A ): '''simple docstring''' lowercase__ = tmp_path / "cache" lowercase__ = {"text": "string"} lowercase__ = features.copy() if features else default_expected_features lowercase__ = ( Features({feature: Value(A ) for feature, dtype in features.items()} ) if features is not None else None ) lowercase__ = TextDatasetReader(A , features=A , cache_dir=A ).read() _check_text_dataset(A , A ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def __a ( A , A , A ): '''simple docstring''' lowercase__ = tmp_path / "cache" lowercase__ = {"text": "string"} lowercase__ = TextDatasetReader(A , cache_dir=A , split=A ).read() _check_text_dataset(A , A ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type" , [str, list] ) def __a ( A , A , A ): '''simple docstring''' if issubclass(A , A ): lowercase__ = text_path elif issubclass(A , A ): lowercase__ = [text_path] lowercase__ = tmp_path / "cache" lowercase__ = {"text": "string"} lowercase__ = TextDatasetReader(A , cache_dir=A ).read() _check_text_dataset(A , A ) def __a ( A , A , A=("train",) ): '''simple docstring''' assert isinstance(A , A ) for split in splits: lowercase__ = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def __a ( A , A , A ): '''simple docstring''' lowercase__ = tmp_path / "cache" lowercase__ = {"text": "string"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowercase__ = TextDatasetReader({"train": text_path} , cache_dir=A , keep_in_memory=A ).read() _check_text_datasetdict(A , A ) @pytest.mark.parametrize( "features" , [ None, {"text": "string"}, {"text": "int32"}, {"text": "float32"}, ] , ) def __a ( A , A , A ): '''simple docstring''' lowercase__ = tmp_path / "cache" # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" lowercase__ = {"text": "string"} lowercase__ = features.copy() if features else default_expected_features lowercase__ = ( Features({feature: Value(A ) for feature, dtype in features.items()} ) if features is not None else None ) lowercase__ = TextDatasetReader({"train": text_path} , features=A , cache_dir=A ).read() _check_text_datasetdict(A , A ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def __a ( A , A , A ): '''simple docstring''' if split: lowercase__ = {split: text_path} else: lowercase__ = "train" lowercase__ = {"train": text_path, "test": text_path} lowercase__ = tmp_path / "cache" lowercase__ = {"text": "string"} lowercase__ = TextDatasetReader(A , cache_dir=A ).read() _check_text_datasetdict(A , A , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
668
"""simple docstring""" lowerCAmelCase_: Union[str, Any] = [ 9_9_9, 8_0_0, 7_9_9, 6_0_0, 5_9_9, 5_0_0, 4_0_0, 3_9_9, 3_7_7, 3_5_5, 3_3_3, 3_1_1, 2_8_8, 2_6_6, 2_4_4, 2_2_2, 2_0_0, 1_9_9, 1_7_7, 1_5_5, 1_3_3, 1_1_1, 8_8, 6_6, 4_4, 2_2, 0, ] lowerCAmelCase_: List[str] = [ 9_9_9, 9_7_6, 9_5_2, 9_2_8, 9_0_5, 8_8_2, 8_5_8, 8_5_7, 8_1_0, 7_6_2, 7_1_5, 7_1_4, 5_7_2, 4_2_9, 4_2_8, 2_8_6, 2_8_5, 2_3_8, 1_9_0, 1_4_3, 1_4_2, 1_1_8, 9_5, 7_1, 4_7, 2_4, 0, ] lowerCAmelCase_: List[str] = [ 9_9_9, 9_8_8, 9_7_7, 9_6_6, 9_5_5, 9_4_4, 9_3_3, 9_2_2, 9_1_1, 9_0_0, 8_9_9, 8_7_9, 8_5_9, 8_4_0, 8_2_0, 8_0_0, 7_9_9, 7_6_6, 7_3_3, 7_0_0, 6_9_9, 6_5_0, 6_0_0, 5_9_9, 5_0_0, 4_9_9, 4_0_0, 3_9_9, 3_5_0, 3_0_0, 2_9_9, 2_6_6, 2_3_3, 2_0_0, 1_9_9, 1_7_9, 1_5_9, 1_4_0, 1_2_0, 1_0_0, 9_9, 8_8, 7_7, 6_6, 5_5, 4_4, 3_3, 2_2, 1_1, 0, ] lowerCAmelCase_: Dict = [ 9_9_9, 9_9_5, 9_9_2, 9_8_9, 9_8_5, 9_8_1, 9_7_8, 9_7_5, 9_7_1, 9_6_7, 9_6_4, 9_6_1, 9_5_7, 9_5_6, 9_5_1, 9_4_7, 9_4_2, 9_3_7, 9_3_3, 9_2_8, 9_2_3, 9_1_9, 9_1_4, 9_1_3, 9_0_8, 9_0_3, 8_9_7, 8_9_2, 8_8_7, 8_8_1, 8_7_6, 8_7_1, 8_7_0, 8_6_4, 8_5_8, 8_5_2, 8_4_6, 8_4_0, 8_3_4, 8_2_8, 8_2_7, 8_2_0, 8_1_3, 8_0_6, 7_9_9, 7_9_2, 7_8_5, 7_8_4, 7_7_7, 7_7_0, 7_6_3, 7_5_6, 7_4_9, 7_4_2, 7_4_1, 7_3_3, 7_2_4, 7_1_6, 7_0_7, 6_9_9, 6_9_8, 6_8_8, 6_7_7, 6_6_6, 6_5_6, 6_5_5, 6_4_5, 6_3_4, 6_2_3, 6_1_3, 6_1_2, 5_9_8, 5_8_4, 5_7_0, 5_6_9, 5_5_5, 5_4_1, 5_2_7, 5_2_6, 5_0_5, 4_8_4, 4_8_3, 4_6_2, 4_4_0, 4_3_9, 3_9_6, 3_9_5, 3_5_2, 3_5_1, 3_0_8, 3_0_7, 2_6_4, 2_6_3, 2_2_0, 2_1_9, 1_7_6, 1_3_2, 8_8, 4_4, 0, ] lowerCAmelCase_: Optional[int] = [ 9_9_9, 9_9_7, 9_9_5, 9_9_2, 9_9_0, 9_8_8, 9_8_6, 9_8_4, 9_8_1, 9_7_9, 9_7_7, 9_7_5, 9_7_2, 9_7_0, 9_6_8, 9_6_6, 9_6_4, 9_6_1, 9_5_9, 9_5_7, 9_5_6, 9_5_4, 9_5_1, 9_4_9, 9_4_6, 9_4_4, 9_4_1, 9_3_9, 9_3_6, 9_3_4, 9_3_1, 9_2_9, 9_2_6, 9_2_4, 9_2_1, 9_1_9, 9_1_6, 9_1_4, 9_1_3, 9_1_0, 9_0_7, 9_0_5, 9_0_2, 8_9_9, 8_9_6, 8_9_3, 8_9_1, 8_8_8, 8_8_5, 8_8_2, 8_7_9, 8_7_7, 8_7_4, 8_7_1, 8_7_0, 8_6_7, 8_6_4, 8_6_1, 8_5_8, 8_5_5, 8_5_2, 8_4_9, 8_4_6, 8_4_3, 8_4_0, 8_3_7, 8_3_4, 8_3_1, 8_2_8, 8_2_7, 8_2_4, 8_2_1, 8_1_7, 8_1_4, 8_1_1, 8_0_8, 8_0_4, 8_0_1, 7_9_8, 7_9_5, 7_9_1, 7_8_8, 7_8_5, 7_8_4, 7_8_0, 7_7_7, 7_7_4, 7_7_0, 7_6_6, 7_6_3, 7_6_0, 7_5_6, 7_5_2, 7_4_9, 7_4_6, 7_4_2, 7_4_1, 7_3_7, 7_3_3, 7_3_0, 7_2_6, 7_2_2, 7_1_8, 7_1_4, 7_1_0, 7_0_7, 7_0_3, 6_9_9, 6_9_8, 6_9_4, 6_9_0, 6_8_5, 6_8_1, 6_7_7, 6_7_3, 6_6_9, 6_6_4, 6_6_0, 6_5_6, 6_5_5, 6_5_0, 6_4_6, 6_4_1, 6_3_6, 6_3_2, 6_2_7, 6_2_2, 6_1_8, 6_1_3, 6_1_2, 6_0_7, 6_0_2, 5_9_6, 5_9_1, 5_8_6, 5_8_0, 5_7_5, 5_7_0, 5_6_9, 5_6_3, 5_5_7, 5_5_1, 5_4_5, 5_3_9, 5_3_3, 5_2_7, 5_2_6, 5_1_9, 5_1_2, 5_0_5, 4_9_8, 4_9_1, 4_8_4, 4_8_3, 4_7_4, 4_6_6, 4_5_7, 4_4_9, 4_4_0, 4_3_9, 4_2_8, 4_1_8, 4_0_7, 3_9_6, 3_9_5, 3_8_1, 3_6_6, 3_5_2, 3_5_1, 3_3_0, 3_0_8, 3_0_7, 2_8_6, 2_6_4, 2_6_3, 2_4_2, 2_2_0, 2_1_9, 1_7_6, 1_7_5, 1_3_2, 1_3_1, 8_8, 4_4, 0, ] lowerCAmelCase_: Tuple = [ 9_9_9, 9_9_1, 9_8_2, 9_7_4, 9_6_6, 9_5_8, 9_5_0, 9_4_1, 9_3_3, 9_2_5, 9_1_6, 9_0_8, 9_0_0, 8_9_9, 8_7_4, 8_5_0, 8_2_5, 8_0_0, 7_9_9, 7_0_0, 6_0_0, 5_0_0, 4_0_0, 3_0_0, 2_0_0, 1_0_0, 0, ] lowerCAmelCase_: str = [ 9_9_9, 9_9_2, 9_8_5, 9_7_8, 9_7_1, 9_6_4, 9_5_7, 9_4_9, 9_4_2, 9_3_5, 9_2_8, 9_2_1, 9_1_4, 9_0_7, 9_0_0, 8_9_9, 8_7_9, 8_5_9, 8_4_0, 8_2_0, 8_0_0, 7_9_9, 7_6_6, 7_3_3, 7_0_0, 6_9_9, 6_5_0, 6_0_0, 5_9_9, 5_0_0, 4_9_9, 4_0_0, 3_9_9, 3_0_0, 2_9_9, 2_0_0, 1_9_9, 1_0_0, 9_9, 0, ] lowerCAmelCase_: int = [ 9_9_9, 9_9_6, 9_9_2, 9_8_9, 9_8_5, 9_8_2, 9_7_9, 9_7_5, 9_7_2, 9_6_8, 9_6_5, 9_6_1, 9_5_8, 9_5_5, 9_5_1, 9_4_8, 9_4_4, 9_4_1, 9_3_8, 9_3_4, 9_3_1, 9_2_7, 9_2_4, 9_2_0, 9_1_7, 9_1_4, 9_1_0, 9_0_7, 9_0_3, 9_0_0, 8_9_9, 8_9_1, 8_8_4, 8_7_6, 8_6_9, 8_6_1, 8_5_3, 8_4_6, 8_3_8, 8_3_0, 8_2_3, 8_1_5, 8_0_8, 8_0_0, 7_9_9, 7_8_8, 7_7_7, 7_6_6, 7_5_5, 7_4_4, 7_3_3, 7_2_2, 7_1_1, 7_0_0, 6_9_9, 6_8_8, 6_7_7, 6_6_6, 6_5_5, 6_4_4, 6_3_3, 6_2_2, 6_1_1, 6_0_0, 5_9_9, 5_8_5, 5_7_1, 5_5_7, 5_4_2, 5_2_8, 5_1_4, 5_0_0, 4_9_9, 4_8_5, 4_7_1, 4_5_7, 4_4_2, 4_2_8, 4_1_4, 4_0_0, 3_9_9, 3_7_9, 3_5_9, 3_4_0, 3_2_0, 3_0_0, 2_9_9, 2_7_9, 2_5_9, 2_4_0, 2_2_0, 2_0_0, 1_9_9, 1_6_6, 1_3_3, 1_0_0, 9_9, 6_6, 3_3, 0, ]
668
1
"""simple docstring""" def __a ( A , A ): '''simple docstring''' lowercase__ = len(A ) + 1 lowercase__ = len(A ) + 1 # dp is a 2d matrix where dp[i][j] denotes whether prefix string of # length i of input_string matches with prefix string of length j of # given pattern. # "dp" stands for dynamic programming. lowercase__ = [[0 for i in range(A )] for j in range(A )] # since string of zero length match pattern of zero length lowercase__ = 1 # since pattern of zero length will never match with string of non-zero length for i in range(1 , A ): lowercase__ = 0 # since string of zero length will match with pattern where there # is at least one * alternatively for j in range(1 , A ): lowercase__ = dp[0][j - 2] if pattern[j - 1] == "*" else 0 # now using bottom-up approach to find for all remaining lengths for i in range(1 , A ): for j in range(1 , A ): if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".": lowercase__ = dp[i - 1][j - 1] elif pattern[j - 1] == "*": if dp[i][j - 2] == 1: lowercase__ = 1 elif pattern[j - 2] in (input_string[i - 1], "."): lowercase__ = dp[i - 1][j] else: lowercase__ = 0 else: lowercase__ = 0 return bool(dp[-1][-1] ) if __name__ == "__main__": import doctest doctest.testmod() # inputing the strings # input_string = input("input a string :") # pattern = input("input a pattern :") lowerCAmelCase_: List[str] = "aab" lowerCAmelCase_: Any = "c*a*b" # using function to check whether given string matches the given pattern if match_pattern(input_string, pattern): print(F'{input_string} matches the given pattern {pattern}') else: print(F'{input_string} does not match with the given pattern {pattern}')
668
"""simple docstring""" from __future__ import annotations def __a ( A , A ): '''simple docstring''' if partitions <= 0: raise ValueError("partitions must be a positive number!" ) if partitions > number_of_bytes: raise ValueError("partitions can not > number_of_bytes!" ) lowercase__ = number_of_bytes // partitions lowercase__ = [] for i in range(A ): lowercase__ = i * bytes_per_partition + 1 lowercase__ = ( number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition ) allocation_list.append(f'''{start_bytes}-{end_bytes}''' ) return allocation_list if __name__ == "__main__": import doctest doctest.testmod()
668
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase_: List[Any] = {"configuration_sew": ["SEW_PRETRAINED_CONFIG_ARCHIVE_MAP", "SEWConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_: List[Any] = [ "SEW_PRETRAINED_MODEL_ARCHIVE_LIST", "SEWForCTC", "SEWForSequenceClassification", "SEWModel", "SEWPreTrainedModel", ] if TYPE_CHECKING: from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_sew import ( SEW_PRETRAINED_MODEL_ARCHIVE_LIST, SEWForCTC, SEWForSequenceClassification, SEWModel, SEWPreTrainedModel, ) else: import sys lowerCAmelCase_: Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
668
"""simple docstring""" from collections import deque class a__ : def __init__( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' lowercase__ = process_name # process name lowercase__ = arrival_time # arrival time of the process # completion time of finished process or last interrupted time lowercase__ = arrival_time lowercase__ = burst_time # remaining burst time lowercase__ = 0 # total time of the process wait in ready queue lowercase__ = 0 # time from arrival time to completion time class a__ : def __init__( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ): '''simple docstring''' lowercase__ = number_of_queues # time slice of queues that round robin algorithm applied lowercase__ = time_slices # unfinished process is in this ready_queue lowercase__ = queue # current time lowercase__ = current_time # finished process is in this sequence queue lowercase__ = deque() def snake_case__ ( self ): '''simple docstring''' lowercase__ = [] for i in range(len(self.finish_queue ) ): sequence.append(self.finish_queue[i].process_name ) return sequence def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' lowercase__ = [] for i in range(len(_UpperCAmelCase ) ): waiting_times.append(queue[i].waiting_time ) return waiting_times def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' lowercase__ = [] for i in range(len(_UpperCAmelCase ) ): turnaround_times.append(queue[i].turnaround_time ) return turnaround_times def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' lowercase__ = [] for i in range(len(_UpperCAmelCase ) ): completion_times.append(queue[i].stop_time ) return completion_times def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' return [q.burst_time for q in queue] def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' process.waiting_time += self.current_time - process.stop_time return process.waiting_time def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' lowercase__ = deque() # sequence deque of finished process while len(_UpperCAmelCase ) != 0: lowercase__ = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of current process self.update_waiting_time(_UpperCAmelCase ) # update current time self.current_time += cp.burst_time # finish the process and set the process's burst-time 0 lowercase__ = 0 # set the process's turnaround time because it is finished lowercase__ = self.current_time - cp.arrival_time # set the completion time lowercase__ = self.current_time # add the process to queue that has finished queue finished.append(_UpperCAmelCase ) self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue # FCFS will finish all remaining processes return finished def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' lowercase__ = deque() # sequence deque of terminated process # just for 1 cycle and unfinished processes will go back to queue for _ in range(len(_UpperCAmelCase ) ): lowercase__ = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of unfinished processes self.update_waiting_time(_UpperCAmelCase ) # if the burst time of process is bigger than time-slice if cp.burst_time > time_slice: # use CPU for only time-slice self.current_time += time_slice # update remaining burst time cp.burst_time -= time_slice # update end point time lowercase__ = self.current_time # locate the process behind the queue because it is not finished ready_queue.append(_UpperCAmelCase ) else: # use CPU for remaining burst time self.current_time += cp.burst_time # set burst time 0 because the process is finished lowercase__ = 0 # set the finish time lowercase__ = self.current_time # update the process' turnaround time because it is finished lowercase__ = self.current_time - cp.arrival_time # add the process to queue that has finished queue finished.append(_UpperCAmelCase ) self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue # return finished processes queue and remaining processes queue return finished, ready_queue def snake_case__ ( self ): '''simple docstring''' for i in range(self.number_of_queues - 1 ): lowercase__ , lowercase__ = self.round_robin( self.ready_queue, self.time_slices[i] ) # the last queue has first_come_first_served algorithm self.first_come_first_served(self.ready_queue ) return self.finish_queue if __name__ == "__main__": import doctest lowerCAmelCase_: Optional[int] = Process("P1", 0, 5_3) lowerCAmelCase_: Union[str, Any] = Process("P2", 0, 1_7) lowerCAmelCase_: str = Process("P3", 0, 6_8) lowerCAmelCase_: int = Process("P4", 0, 2_4) lowerCAmelCase_: Dict = 3 lowerCAmelCase_: Any = [1_7, 2_5] lowerCAmelCase_: Tuple = deque([Pa, Pa, Pa, Pa]) if len(time_slices) != number_of_queues - 1: raise SystemExit(0) doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])}) lowerCAmelCase_: Any = Process("P1", 0, 5_3) lowerCAmelCase_: Tuple = Process("P2", 0, 1_7) lowerCAmelCase_: Optional[int] = Process("P3", 0, 6_8) lowerCAmelCase_: List[Any] = Process("P4", 0, 2_4) lowerCAmelCase_: Union[str, Any] = 3 lowerCAmelCase_: Any = [1_7, 2_5] lowerCAmelCase_: Optional[Any] = deque([Pa, Pa, Pa, Pa]) lowerCAmelCase_: Union[str, Any] = MLFQ(number_of_queues, time_slices, queue, 0) lowerCAmelCase_: Tuple = mlfq.multi_level_feedback_queue() # print total waiting times of processes(P1, P2, P3, P4) print( F'waiting time:\ \t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}' ) # print completion times of processes(P1, P2, P3, P4) print( F'completion time:\ \t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}' ) # print total turnaround times of processes(P1, P2, P3, P4) print( F'turnaround time:\ \t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}' ) # print sequence of finished processes print( F'sequence of finished processes:\ {mlfq.calculate_sequence_of_finish_queue()}' )
668
1
"""simple docstring""" import functools def __a ( A , A ): '''simple docstring''' lowercase__ = len(A ) lowercase__ = len(A ) @functools.cache def min_distance(A , A ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa lowercase__ = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , A ) , 1 + min_distance(A , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
668
"""simple docstring""" import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, ByTaTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): lowerCAmelCase_: Dict = "pt" elif is_tf_available(): lowerCAmelCase_: Dict = "tf" else: lowerCAmelCase_: str = "jax" class a__ ( _a , unittest.TestCase ): snake_case_ = ByTaTokenizer snake_case_ = False def snake_case__ ( self ): '''simple docstring''' super().setUp() lowercase__ = ByTaTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def snake_case__ ( self ): '''simple docstring''' return ByTaTokenizer.from_pretrained("google/byt5-small" ) def snake_case__ ( self, **_UpperCAmelCase ): '''simple docstring''' return self.tokenizer_class.from_pretrained(self.tmpdirname, **_UpperCAmelCase ) def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase=False, _UpperCAmelCase=20, _UpperCAmelCase=5 ): '''simple docstring''' lowercase__ = [] for i in range(len(_UpperCAmelCase ) ): try: lowercase__ = tokenizer.decode([i], clean_up_tokenization_spaces=_UpperCAmelCase ) except UnicodeDecodeError: pass toks.append((i, tok) ) lowercase__ = list(filter(lambda _UpperCAmelCase : re.match(R"^[ a-zA-Z]+$", t[1] ), _UpperCAmelCase ) ) lowercase__ = list(filter(lambda _UpperCAmelCase : [t[0]] == tokenizer.encode(t[1], add_special_tokens=_UpperCAmelCase ), _UpperCAmelCase ) ) if max_length is not None and len(_UpperCAmelCase ) > max_length: lowercase__ = toks[:max_length] if min_length is not None and len(_UpperCAmelCase ) < min_length and len(_UpperCAmelCase ) > 0: while len(_UpperCAmelCase ) < min_length: lowercase__ = toks + toks # toks_str = [t[1] for t in toks] lowercase__ = [t[0] for t in toks] # Ensure consistency lowercase__ = tokenizer.decode(_UpperCAmelCase, clean_up_tokenization_spaces=_UpperCAmelCase ) if " " not in output_txt and len(_UpperCAmelCase ) > 1: lowercase__ = ( tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=_UpperCAmelCase ) + " " + tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=_UpperCAmelCase ) ) if with_prefix_space: lowercase__ = " " + output_txt lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) return output_txt, output_ids def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.ta_base_tokenizer lowercase__ = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"] ) lowercase__ = tokenizer(["hi", "I went to the gym", ""] ) self.assertListEqual(batch_with_eos_added["input_ids"], batch_without_eos_added["input_ids"] ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.ta_base_tokenizer lowercase__ = "Unicode €." lowercase__ = tokenizer(_UpperCAmelCase ) lowercase__ = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1] self.assertEqual(encoded["input_ids"], _UpperCAmelCase ) # decoding lowercase__ = tokenizer.decode(_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase, "Unicode €.</s>" ) lowercase__ = tokenizer("e è é ê ë" ) lowercase__ = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1] self.assertEqual(encoded["input_ids"], _UpperCAmelCase ) # decoding lowercase__ = tokenizer.decode(_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase, "e è é ê ë</s>" ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ), "e è é ê ë</s>" ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.ta_base_tokenizer lowercase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."] # fmt: off lowercase__ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0] # fmt: on lowercase__ = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, return_tensors=_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase ) if FRAMEWORK != "jax": lowercase__ = list(batch.input_ids.numpy()[0] ) else: lowercase__ = list(batch.input_ids.tolist()[0] ) self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase ) self.assertEqual((2, 37), batch.input_ids.shape ) self.assertEqual((2, 37), batch.attention_mask.shape ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.ta_base_tokenizer lowercase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."] lowercase__ = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, return_tensors=_UpperCAmelCase ) # check if input_ids are returned and no decoder_input_ids self.assertIn("input_ids", _UpperCAmelCase ) self.assertIn("attention_mask", _UpperCAmelCase ) self.assertNotIn("decoder_input_ids", _UpperCAmelCase ) self.assertNotIn("decoder_attention_mask", _UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.ta_base_tokenizer lowercase__ = [ "Summary of the text.", "Another summary.", ] lowercase__ = tokenizer( text_target=_UpperCAmelCase, max_length=32, padding="max_length", truncation=_UpperCAmelCase, return_tensors=_UpperCAmelCase ) self.assertEqual(32, targets["input_ids"].shape[1] ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.ta_base_tokenizer lowercase__ = ["A long paragraph for summarization. </s>"] lowercase__ = ["Summary of the text. </s>"] # fmt: off lowercase__ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1] lowercase__ = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1] # fmt: on lowercase__ = tokenizer(_UpperCAmelCase, text_target=_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase, batch["input_ids"][0] ) self.assertEqual(_UpperCAmelCase, batch["labels"][0] ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length, 42 ) # Now let's start the test lowercase__ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc lowercase__ = tempfile.mkdtemp() lowercase__ = " He is very happy, UNwant\u00E9d,running" lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) tokenizer.save_pretrained(_UpperCAmelCase ) lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase ) lowercase__ = after_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase ) shutil.rmtree(_UpperCAmelCase ) lowercase__ = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc lowercase__ = tempfile.mkdtemp() lowercase__ = " He is very happy, UNwant\u00E9d,running" tokenizer.add_tokens(["bim", "bambam"] ) lowercase__ = tokenizer.additional_special_tokens additional_special_tokens.append("new_additional_special_token" ) tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} ) lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) tokenizer.save_pretrained(_UpperCAmelCase ) lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase ) lowercase__ = after_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase ) self.assertIn("new_additional_special_token", after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length, 42 ) lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase, model_max_length=43 ) self.assertEqual(tokenizer.model_max_length, 43 ) shutil.rmtree(_UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(_UpperCAmelCase ) with open(os.path.join(_UpperCAmelCase, "special_tokens_map.json" ), encoding="utf-8" ) as json_file: lowercase__ = json.load(_UpperCAmelCase ) with open(os.path.join(_UpperCAmelCase, "tokenizer_config.json" ), encoding="utf-8" ) as json_file: lowercase__ = json.load(_UpperCAmelCase ) lowercase__ = [F'''<extra_id_{i}>''' for i in range(125 )] lowercase__ = added_tokens_extra_ids + [ "an_additional_special_token" ] lowercase__ = added_tokens_extra_ids + [ "an_additional_special_token" ] with open(os.path.join(_UpperCAmelCase, "special_tokens_map.json" ), "w", encoding="utf-8" ) as outfile: json.dump(_UpperCAmelCase, _UpperCAmelCase ) with open(os.path.join(_UpperCAmelCase, "tokenizer_config.json" ), "w", encoding="utf-8" ) as outfile: json.dump(_UpperCAmelCase, _UpperCAmelCase ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files lowercase__ = tokenizer_class.from_pretrained( _UpperCAmelCase, ) self.assertIn( "an_additional_special_token", tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( ["an_additional_special_token"], tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ), ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained lowercase__ = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token", lstrip=_UpperCAmelCase )] lowercase__ = tokenizer_class.from_pretrained( _UpperCAmelCase, additional_special_tokens=_UpperCAmelCase, ) self.assertIn("a_new_additional_special_token", tokenizer.additional_special_tokens ) self.assertEqual( ["a_new_additional_special_token"], tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ), ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(_UpperCAmelCase ) lowercase__ = tokenizer_class.from_pretrained(_UpperCAmelCase ) self.assertTrue(tokenizer.decode([255] ) == "" ) def snake_case__ ( self ): '''simple docstring''' pass def snake_case__ ( self ): '''simple docstring''' pass def snake_case__ ( self ): '''simple docstring''' pass def snake_case__ ( self ): '''simple docstring''' pass def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.get_tokenizers(fast=_UpperCAmelCase, do_lower_case=_UpperCAmelCase ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): lowercase__ = ["t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "x", "t", "</s>"] lowercase__ = tokenizer.convert_tokens_to_string(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): lowercase__ = [ "bos_token", "eos_token", "unk_token", "sep_token", "pad_token", "cls_token", "mask_token", ] lowercase__ = 0 lowercase__ = tokenizer.convert_ids_to_tokens( _UpperCAmelCase, skip_special_tokens=_UpperCAmelCase ) for attr in attributes_list: setattr(_UpperCAmelCase, attr + "_id", _UpperCAmelCase ) self.assertEqual(getattr(_UpperCAmelCase, _UpperCAmelCase ), _UpperCAmelCase ) self.assertEqual(getattr(_UpperCAmelCase, attr + "_id" ), _UpperCAmelCase ) setattr(_UpperCAmelCase, attr + "_id", _UpperCAmelCase ) self.assertEqual(getattr(_UpperCAmelCase, _UpperCAmelCase ), _UpperCAmelCase ) self.assertEqual(getattr(_UpperCAmelCase, attr + "_id" ), _UpperCAmelCase ) setattr(_UpperCAmelCase, "additional_special_tokens_ids", [] ) self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens" ), [] ) self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens_ids" ), [] ) setattr(_UpperCAmelCase, "additional_special_tokens_ids", [token_id_to_test_setters] ) self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens" ), [token_to_test_setters] ) self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens_ids" ), [token_id_to_test_setters] )
668
1
"""simple docstring""" from argparse import ArgumentParser from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline from ..utils import logging from . import BaseTransformersCLICommand lowerCAmelCase_: str = logging.get_logger(__name__) # pylint: disable=invalid-name def __a ( A ): '''simple docstring''' if not path: return "pipe" for ext in PipelineDataFormat.SUPPORTED_FORMATS: if path.endswith(A ): return ext raise Exception( f'''Unable to determine file format from file extension {path}. ''' f'''Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}''' ) def __a ( A ): '''simple docstring''' lowercase__ = pipeline( task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , ) lowercase__ = try_infer_format_from_ext(args.input ) if args.format == "infer" else args.format lowercase__ = PipelineDataFormat.from_str( format=A , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , ) return RunCommand(A , A ) class a__ ( _a ): def __init__( self, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' lowercase__ = nlp lowercase__ = reader @staticmethod def snake_case__ ( _UpperCAmelCase ): '''simple docstring''' lowercase__ = parser.add_parser("run", help="Run a pipeline through the CLI" ) run_parser.add_argument("--task", choices=get_supported_tasks(), help="Task to run" ) run_parser.add_argument("--input", type=_UpperCAmelCase, help="Path to the file to use for inference" ) run_parser.add_argument("--output", type=_UpperCAmelCase, help="Path to the file that will be used post to write results." ) run_parser.add_argument("--model", type=_UpperCAmelCase, help="Name or path to the model to instantiate." ) run_parser.add_argument("--config", type=_UpperCAmelCase, help="Name or path to the model's config to instantiate." ) run_parser.add_argument( "--tokenizer", type=_UpperCAmelCase, help="Name of the tokenizer to use. (default: same as the model name)" ) run_parser.add_argument( "--column", type=_UpperCAmelCase, help="Name of the column to use as input. (For multi columns input as QA use column1,columns2)", ) run_parser.add_argument( "--format", type=_UpperCAmelCase, default="infer", choices=PipelineDataFormat.SUPPORTED_FORMATS, help="Input format to read from", ) run_parser.add_argument( "--device", type=_UpperCAmelCase, default=-1, help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)", ) run_parser.add_argument("--overwrite", action="store_true", help="Allow overwriting the output file." ) run_parser.set_defaults(func=_UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' lowercase__ , lowercase__ = self._nlp, [] for entry in self._reader: lowercase__ = nlp(**_UpperCAmelCase ) if self._reader.is_multi_columns else nlp(_UpperCAmelCase ) if isinstance(_UpperCAmelCase, _UpperCAmelCase ): outputs.append(_UpperCAmelCase ) else: outputs += output # Saving data if self._nlp.binary_output: lowercase__ = self._reader.save_binary(_UpperCAmelCase ) logger.warning(F'''Current pipeline requires output to be in binary format, saving at {binary_path}''' ) else: self._reader.save(_UpperCAmelCase )
668
"""simple docstring""" import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class a__ ( unittest.TestCase ): snake_case_ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' lowercase__ = hf_hub_download( repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset" ) lowercase__ = VideoClassificationPipeline(model=_UpperCAmelCase, image_processor=_UpperCAmelCase, top_k=2 ) lowercase__ = [ example_video_filepath, "https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4", ] return video_classifier, examples def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' for example in examples: lowercase__ = video_classifier(_UpperCAmelCase ) self.assertEqual( _UpperCAmelCase, [ {"score": ANY(_UpperCAmelCase ), "label": ANY(_UpperCAmelCase )}, {"score": ANY(_UpperCAmelCase ), "label": ANY(_UpperCAmelCase )}, ], ) @require_torch def snake_case__ ( self ): '''simple docstring''' lowercase__ = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification" lowercase__ = VideoMAEFeatureExtractor( size={"shortest_edge": 10}, crop_size={"height": 10, "width": 10} ) lowercase__ = pipeline( "video-classification", model=_UpperCAmelCase, feature_extractor=_UpperCAmelCase, frame_sampling_rate=4 ) lowercase__ = hf_hub_download(repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset" ) lowercase__ = video_classifier(_UpperCAmelCase, top_k=2 ) self.assertEqual( nested_simplify(_UpperCAmelCase, decimals=4 ), [{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}], ) lowercase__ = video_classifier( [ video_file_path, video_file_path, ], top_k=2, ) self.assertEqual( nested_simplify(_UpperCAmelCase, decimals=4 ), [ [{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}], [{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}], ], ) @require_tf def snake_case__ ( self ): '''simple docstring''' pass
668
1
"""simple docstring""" import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def __a ( A ): '''simple docstring''' lowercase__ = filter(lambda A : p.requires_grad , model.parameters() ) lowercase__ = sum([np.prod(p.size() ) for p in model_parameters] ) return params lowerCAmelCase_: str = logging.getLogger(__name__) def __a ( A , A ): '''simple docstring''' if metric == "rouge2": lowercase__ = "{val_avg_rouge2:.4f}-{step_count}" elif metric == "bleu": lowercase__ = "{val_avg_bleu:.4f}-{step_count}" elif metric == "em": lowercase__ = "{val_avg_em:.4f}-{step_count}" elif metric == "loss": lowercase__ = "{val_avg_loss:.4f}-{step_count}" else: raise NotImplementedError( f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this''' " function." ) lowercase__ = ModelCheckpoint( dirpath=A , filename=A , monitor=f'''val_{metric}''' , mode="max" , save_top_k=1 , every_n_epochs=1 , ) return checkpoint_callback def __a ( A , A ): '''simple docstring''' return EarlyStopping( monitor=f'''val_{metric}''' , mode="min" if "loss" in metric else "max" , patience=A , verbose=A , ) class a__ ( pl.Callback ): def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' lowercase__ = {F'''lr_group_{i}''': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(_UpperCAmelCase ) @rank_zero_only def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=True ): '''simple docstring''' logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' ) lowercase__ = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} ) # Log results lowercase__ = Path(pl_module.hparams.output_dir ) if type_path == "test": lowercase__ = od / "test_results.txt" lowercase__ = od / "test_generations.txt" else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. lowercase__ = od / F'''{type_path}_results/{trainer.global_step:05d}.txt''' lowercase__ = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt''' results_file.parent.mkdir(exist_ok=_UpperCAmelCase ) generations_file.parent.mkdir(exist_ok=_UpperCAmelCase ) with open(_UpperCAmelCase, "a+" ) as writer: for key in sorted(_UpperCAmelCase ): if key in ["log", "progress_bar", "preds"]: continue lowercase__ = metrics[key] if isinstance(_UpperCAmelCase, torch.Tensor ): lowercase__ = val.item() lowercase__ = F'''{key}: {val:.6f}\n''' writer.write(_UpperCAmelCase ) if not save_generations: return if "preds" in metrics: lowercase__ = "\n".join(metrics["preds"] ) generations_file.open("w+" ).write(_UpperCAmelCase ) @rank_zero_only def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' try: lowercase__ = pl_module.model.model.num_parameters() except AttributeError: lowercase__ = pl_module.model.num_parameters() lowercase__ = count_trainable_parameters(_UpperCAmelCase ) # mp stands for million parameters trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1E6, "grad_mp": n_trainable_pars / 1E6} ) @rank_zero_only def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' save_json(pl_module.metrics, pl_module.metrics_save_path ) return self._write_logs(_UpperCAmelCase, _UpperCAmelCase, "test" ) @rank_zero_only def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' save_json(pl_module.metrics, pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
668
"""simple docstring""" import itertools import math def __a ( A ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __a ( ): '''simple docstring''' lowercase__ = 2 while True: if is_prime(A ): yield num num += 1 def __a ( A = 1_00_01 ): '''simple docstring''' return next(itertools.islice(prime_generator() , nth - 1 , A ) ) if __name__ == "__main__": print(F'{solution() = }')
668
1
"""simple docstring""" from math import factorial def __a ( A , A ): '''simple docstring''' if n < k or k < 0: raise ValueError("Please enter positive integers for n and k where n >= k" ) return factorial(A ) // (factorial(A ) * factorial(n - k )) if __name__ == "__main__": print( "The number of five-card hands possible from a standard", F'fifty-two card deck is: {combinations(5_2, 5)}\n', ) print( "If a class of 40 students must be arranged into groups of", F'4 for group projects, there are {combinations(4_0, 4)} ways', "to arrange them.\n", ) print( "If 10 teams are competing in a Formula One race, there", F'are {combinations(1_0, 3)} ways that first, second and', "third place can be awarded.", )
668
"""simple docstring""" from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class a__ ( _a ): def __init__( self, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = False, _UpperCAmelCase = False, _UpperCAmelCase = None, **_UpperCAmelCase, ): '''simple docstring''' super().__init__( _UpperCAmelCase, split=_UpperCAmelCase, features=_UpperCAmelCase, cache_dir=_UpperCAmelCase, keep_in_memory=_UpperCAmelCase, streaming=_UpperCAmelCase, num_proc=_UpperCAmelCase, **_UpperCAmelCase, ) lowercase__ = path_or_paths if isinstance(_UpperCAmelCase, _UpperCAmelCase ) else {self.split: path_or_paths} lowercase__ = Text( cache_dir=_UpperCAmelCase, data_files=_UpperCAmelCase, features=_UpperCAmelCase, **_UpperCAmelCase, ) def snake_case__ ( self ): '''simple docstring''' if self.streaming: lowercase__ = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: lowercase__ = None lowercase__ = None lowercase__ = None lowercase__ = None self.builder.download_and_prepare( download_config=_UpperCAmelCase, download_mode=_UpperCAmelCase, verification_mode=_UpperCAmelCase, base_path=_UpperCAmelCase, num_proc=self.num_proc, ) lowercase__ = self.builder.as_dataset( split=self.split, verification_mode=_UpperCAmelCase, in_memory=self.keep_in_memory ) return dataset
668
1
"""simple docstring""" import itertools import math def __a ( A ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __a ( ): '''simple docstring''' lowercase__ = 2 while True: if is_prime(A ): yield num num += 1 def __a ( A = 1_00_01 ): '''simple docstring''' return next(itertools.islice(prime_generator() , nth - 1 , A ) ) if __name__ == "__main__": print(F'{solution() = }')
668
"""simple docstring""" import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler lowerCAmelCase_: List[str] = 1_6 lowerCAmelCase_: Optional[Any] = 3_2 def __a ( A , A = 16 , A = "bert-base-cased" ): '''simple docstring''' lowercase__ = AutoTokenizer.from_pretrained(A ) lowercase__ = load_dataset("glue" , "mrpc" ) def tokenize_function(A ): # max_length=None => use the model max length (it's actually the default) lowercase__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=A , max_length=A ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset lowercase__ = datasets.map( A , batched=A , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=A ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowercase__ = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(A ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(A , padding="max_length" , max_length=1_28 , return_tensors="pt" ) return tokenizer.pad(A , padding="longest" , return_tensors="pt" ) # Instantiate dataloaders. lowercase__ = DataLoader( tokenized_datasets["train"] , shuffle=A , collate_fn=A , batch_size=A ) lowercase__ = DataLoader( tokenized_datasets["validation"] , shuffle=A , collate_fn=A , batch_size=A ) return train_dataloader, eval_dataloader def __a ( A , A ): '''simple docstring''' lowercase__ = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowercase__ = config["lr"] lowercase__ = int(config["num_epochs"] ) lowercase__ = int(config["seed"] ) lowercase__ = int(config["batch_size"] ) lowercase__ = args.model_name_or_path set_seed(A ) lowercase__ , lowercase__ = get_dataloaders(A , A , A ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowercase__ = AutoModelForSequenceClassification.from_pretrained(A , return_dict=A ) # Instantiate optimizer lowercase__ = ( AdamW if accelerator.state.deepspeed_plugin is None or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) lowercase__ = optimizer_cls(params=model.parameters() , lr=A ) if accelerator.state.deepspeed_plugin is not None: lowercase__ = accelerator.state.deepspeed_plugin.deepspeed_config[ "gradient_accumulation_steps" ] else: lowercase__ = 1 lowercase__ = (len(A ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): lowercase__ = get_linear_schedule_with_warmup( optimizer=A , num_warmup_steps=0 , num_training_steps=A , ) else: lowercase__ = DummyScheduler(A , total_num_steps=A , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare( A , A , A , A , A ) # We need to keep track of how many total steps we have iterated over lowercase__ = 0 # We also need to keep track of the stating epoch so files are named properly lowercase__ = 0 # Now we train the model lowercase__ = evaluate.load("glue" , "mrpc" ) lowercase__ = 0 lowercase__ = {} for epoch in range(A , A ): model.train() for step, batch in enumerate(A ): lowercase__ = model(**A ) lowercase__ = outputs.loss lowercase__ = loss / gradient_accumulation_steps accelerator.backward(A ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() lowercase__ = 0 for step, batch in enumerate(A ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowercase__ = model(**A ) lowercase__ = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times lowercase__ , lowercase__ = accelerator.gather( (predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(A ) - 1: lowercase__ = predictions[: len(eval_dataloader.dataset ) - samples_seen] lowercase__ = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=A , references=A , ) lowercase__ = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}:''' , A ) lowercase__ = eval_metric["accuracy"] if best_performance < eval_metric["accuracy"]: lowercase__ = eval_metric["accuracy"] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}''' accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , "all_results.json" ) , "w" ) as f: json.dump(A , A ) def __a ( ): '''simple docstring''' lowercase__ = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." ) parser.add_argument( "--model_name_or_path" , type=A , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=A , ) parser.add_argument( "--output_dir" , type=A , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , ) parser.add_argument( "--performance_lower_bound" , type=A , default=A , help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value." , ) parser.add_argument( "--num_epochs" , type=A , default=3 , help="Number of train epochs." , ) lowercase__ = parser.parse_args() lowercase__ = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16} training_function(A , A ) if __name__ == "__main__": main()
668
1
"""simple docstring""" def __a ( A ): '''simple docstring''' lowercase__ = [0] * len(A ) for i in range(1 , len(A ) ): # use last results for better performance - dynamic programming lowercase__ = prefix_result[i - 1] while j > 0 and input_string[i] != input_string[j]: lowercase__ = prefix_result[j - 1] if input_string[i] == input_string[j]: j += 1 lowercase__ = j return prefix_result def __a ( A ): '''simple docstring''' return max(prefix_function(A ) ) if __name__ == "__main__": import doctest doctest.testmod()
668
"""simple docstring""" import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class a__ ( _a ): snake_case_ = (IPNDMScheduler,) snake_case_ = (("num_inference_steps", 50),) def snake_case__ ( self, **_UpperCAmelCase ): '''simple docstring''' lowercase__ = {"num_train_timesteps": 1000} config.update(**_UpperCAmelCase ) return config def snake_case__ ( self, _UpperCAmelCase=0, **_UpperCAmelCase ): '''simple docstring''' lowercase__ = dict(self.forward_default_kwargs ) lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase ) lowercase__ = self.dummy_sample lowercase__ = 0.1 * sample lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: lowercase__ = self.get_scheduler_config(**_UpperCAmelCase ) lowercase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals lowercase__ = dummy_past_residuals[:] if time_step is None: lowercase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_UpperCAmelCase ) lowercase__ = scheduler_class.from_pretrained(_UpperCAmelCase ) new_scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals lowercase__ = dummy_past_residuals[:] lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def snake_case__ ( self ): '''simple docstring''' pass def snake_case__ ( self, _UpperCAmelCase=0, **_UpperCAmelCase ): '''simple docstring''' lowercase__ = dict(self.forward_default_kwargs ) lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase ) lowercase__ = self.dummy_sample lowercase__ = 0.1 * sample lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: lowercase__ = self.get_scheduler_config() lowercase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals (must be after setting timesteps) lowercase__ = dummy_past_residuals[:] if time_step is None: lowercase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_UpperCAmelCase ) lowercase__ = scheduler_class.from_pretrained(_UpperCAmelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residual (must be after setting timesteps) lowercase__ = dummy_past_residuals[:] lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def snake_case__ ( self, **_UpperCAmelCase ): '''simple docstring''' lowercase__ = self.scheduler_classes[0] lowercase__ = self.get_scheduler_config(**_UpperCAmelCase ) lowercase__ = scheduler_class(**_UpperCAmelCase ) lowercase__ = 10 lowercase__ = self.dummy_model() lowercase__ = self.dummy_sample_deter scheduler.set_timesteps(_UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): lowercase__ = model(_UpperCAmelCase, _UpperCAmelCase ) lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ).prev_sample for i, t in enumerate(scheduler.timesteps ): lowercase__ = model(_UpperCAmelCase, _UpperCAmelCase ) lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ).prev_sample return sample def snake_case__ ( self ): '''simple docstring''' lowercase__ = dict(self.forward_default_kwargs ) lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase ) for scheduler_class in self.scheduler_classes: lowercase__ = self.get_scheduler_config() lowercase__ = scheduler_class(**_UpperCAmelCase ) lowercase__ = self.dummy_sample lowercase__ = 0.1 * sample if num_inference_steps is not None and hasattr(_UpperCAmelCase, "set_timesteps" ): scheduler.set_timesteps(_UpperCAmelCase ) elif num_inference_steps is not None and not hasattr(_UpperCAmelCase, "set_timesteps" ): lowercase__ = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] lowercase__ = dummy_past_residuals[:] lowercase__ = scheduler.timesteps[5] lowercase__ = scheduler.timesteps[6] lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample self.assertEqual(output_a.shape, sample.shape ) self.assertEqual(output_a.shape, output_a.shape ) lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample self.assertEqual(output_a.shape, sample.shape ) self.assertEqual(output_a.shape, output_a.shape ) def snake_case__ ( self ): '''simple docstring''' for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=_UpperCAmelCase, time_step=_UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100] ): self.check_over_forward(num_inference_steps=_UpperCAmelCase, time_step=_UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.full_loop() lowercase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_mean.item() - 254_0529 ) < 10
668
1
"""simple docstring""" from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class a__ : def __init__( self, _UpperCAmelCase, _UpperCAmelCase=13, _UpperCAmelCase=7, _UpperCAmelCase=True, _UpperCAmelCase=True, _UpperCAmelCase=True, _UpperCAmelCase=True, _UpperCAmelCase=99, _UpperCAmelCase=32, _UpperCAmelCase=2, _UpperCAmelCase=4, _UpperCAmelCase=37, _UpperCAmelCase="gelu", _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=512, _UpperCAmelCase=16, _UpperCAmelCase=2, _UpperCAmelCase=0.02, _UpperCAmelCase=3, _UpperCAmelCase=4, _UpperCAmelCase=None, ): '''simple docstring''' lowercase__ = parent lowercase__ = 13 lowercase__ = 7 lowercase__ = True lowercase__ = True lowercase__ = True lowercase__ = True lowercase__ = 99 lowercase__ = 384 lowercase__ = 2 lowercase__ = 4 lowercase__ = 37 lowercase__ = "gelu" lowercase__ = 0.1 lowercase__ = 0.1 lowercase__ = 512 lowercase__ = 16 lowercase__ = 2 lowercase__ = 0.02 lowercase__ = 3 lowercase__ = 4 lowercase__ = 128 lowercase__ = 2 lowercase__ = 9 lowercase__ = 1 lowercase__ = None def snake_case__ ( self ): '''simple docstring''' lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) lowercase__ = None if self.use_input_mask: lowercase__ = random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ = None if self.use_token_type_ids: lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size ) lowercase__ = None lowercase__ = None lowercase__ = None if self.use_labels: lowercase__ = ids_tensor([self.batch_size], self.type_sequence_label_size ) lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.num_labels ) lowercase__ = ids_tensor([self.batch_size], self.num_choices ) lowercase__ = ConvBertConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, return_dict=_UpperCAmelCase, ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' lowercase__ = TFConvBertModel(config=_UpperCAmelCase ) lowercase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} lowercase__ = [input_ids, input_mask] lowercase__ = model(_UpperCAmelCase ) lowercase__ = model(_UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' lowercase__ = TFConvBertForMaskedLM(config=_UpperCAmelCase ) lowercase__ = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } lowercase__ = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' lowercase__ = self.num_labels lowercase__ = TFConvBertForSequenceClassification(config=_UpperCAmelCase ) lowercase__ = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } lowercase__ = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' lowercase__ = self.num_choices lowercase__ = TFConvBertForMultipleChoice(config=_UpperCAmelCase ) lowercase__ = tf.tile(tf.expand_dims(_UpperCAmelCase, 1 ), (1, self.num_choices, 1) ) lowercase__ = tf.tile(tf.expand_dims(_UpperCAmelCase, 1 ), (1, self.num_choices, 1) ) lowercase__ = tf.tile(tf.expand_dims(_UpperCAmelCase, 1 ), (1, self.num_choices, 1) ) lowercase__ = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } lowercase__ = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) ) def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' lowercase__ = self.num_labels lowercase__ = TFConvBertForTokenClassification(config=_UpperCAmelCase ) lowercase__ = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } lowercase__ = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) ) def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' lowercase__ = TFConvBertForQuestionAnswering(config=_UpperCAmelCase ) lowercase__ = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } lowercase__ = model(_UpperCAmelCase ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.prepare_config_and_inputs() ( ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ) = config_and_inputs lowercase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class a__ ( _a , _a , unittest.TestCase ): snake_case_ = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) snake_case_ = ( { "feature-extraction": TFConvBertModel, "fill-mask": TFConvBertForMaskedLM, "question-answering": TFConvBertForQuestionAnswering, "text-classification": TFConvBertForSequenceClassification, "token-classification": TFConvBertForTokenClassification, "zero-shot": TFConvBertForSequenceClassification, } if is_tf_available() else {} ) snake_case_ = False snake_case_ = False snake_case_ = False def snake_case__ ( self ): '''simple docstring''' lowercase__ = TFConvBertModelTester(self ) lowercase__ = ConfigTester(self, config_class=_UpperCAmelCase, hidden_size=37 ) def snake_case__ ( self ): '''simple docstring''' self.config_tester.run_common_tests() def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase ) @slow def snake_case__ ( self ): '''simple docstring''' lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ = True lowercase__ = True if hasattr(_UpperCAmelCase, "use_cache" ): lowercase__ = True lowercase__ = getattr(self.model_tester, "encoder_seq_length", self.model_tester.seq_length ) lowercase__ = getattr(self.model_tester, "key_length", _UpperCAmelCase ) for model_class in self.all_model_classes: lowercase__ = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase ) lowercase__ = model_class(_UpperCAmelCase ) lowercase__ = len(model(_UpperCAmelCase ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_UpperCAmelCase, saved_model=_UpperCAmelCase ) lowercase__ = os.path.join(_UpperCAmelCase, "saved_model", "1" ) lowercase__ = tf.keras.models.load_model(_UpperCAmelCase ) lowercase__ = model(_UpperCAmelCase ) if self.is_encoder_decoder: lowercase__ = outputs["encoder_hidden_states"] lowercase__ = outputs["encoder_attentions"] else: lowercase__ = outputs["hidden_states"] lowercase__ = outputs["attentions"] self.assertEqual(len(_UpperCAmelCase ), _UpperCAmelCase ) lowercase__ = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(_UpperCAmelCase ), _UpperCAmelCase ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ), [self.model_tester.seq_length, self.model_tester.hidden_size], ) self.assertEqual(len(_UpperCAmelCase ), self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length], ) @slow def snake_case__ ( self ): '''simple docstring''' lowercase__ = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" ) self.assertIsNotNone(_UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ = True lowercase__ = getattr(self.model_tester, "decoder_seq_length", self.model_tester.seq_length ) lowercase__ = getattr(self.model_tester, "encoder_seq_length", self.model_tester.seq_length ) lowercase__ = getattr(self.model_tester, "key_length", _UpperCAmelCase ) lowercase__ = getattr(self.model_tester, "key_length", _UpperCAmelCase ) def check_decoder_attentions_output(_UpperCAmelCase ): lowercase__ = len(_UpperCAmelCase ) self.assertEqual(out_len % 2, 0 ) lowercase__ = outputs.decoder_attentions self.assertEqual(len(_UpperCAmelCase ), self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length], ) def check_encoder_attentions_output(_UpperCAmelCase ): lowercase__ = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(_UpperCAmelCase ), self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length], ) for model_class in self.all_model_classes: lowercase__ = True lowercase__ = False lowercase__ = model_class(_UpperCAmelCase ) lowercase__ = model(self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase ) ) lowercase__ = len(_UpperCAmelCase ) self.assertEqual(config.output_hidden_states, _UpperCAmelCase ) check_encoder_attentions_output(_UpperCAmelCase ) if self.is_encoder_decoder: lowercase__ = model_class(_UpperCAmelCase ) lowercase__ = model(self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase ) ) self.assertEqual(config.output_hidden_states, _UpperCAmelCase ) check_decoder_attentions_output(_UpperCAmelCase ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] lowercase__ = True lowercase__ = model_class(_UpperCAmelCase ) lowercase__ = model(self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase ) ) self.assertEqual(config.output_hidden_states, _UpperCAmelCase ) check_encoder_attentions_output(_UpperCAmelCase ) # Check attention is always last and order is fine lowercase__ = True lowercase__ = True lowercase__ = model_class(_UpperCAmelCase ) lowercase__ = model(self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1), len(_UpperCAmelCase ) ) self.assertEqual(model.config.output_hidden_states, _UpperCAmelCase ) check_encoder_attentions_output(_UpperCAmelCase ) @require_tf class a__ ( unittest.TestCase ): @slow def snake_case__ ( self ): '''simple docstring''' lowercase__ = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" ) lowercase__ = tf.constant([[0, 1, 2, 3, 4, 5]] ) lowercase__ = model(_UpperCAmelCase )[0] lowercase__ = [1, 6, 768] self.assertEqual(output.shape, _UpperCAmelCase ) lowercase__ = tf.constant( [ [ [-0.03_475_493, -0.4_686_034, -0.30_638_832], [0.22_637_248, -0.26_988_646, -0.7_423_424], [0.10_324_868, -0.45_013_508, -0.58_280_784], ] ] ) tf.debugging.assert_near(output[:, :3, :3], _UpperCAmelCase, atol=1E-4 )
668
"""simple docstring""" import json import os import unittest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class a__ ( _a , unittest.TestCase ): snake_case_ = MgpstrTokenizer snake_case_ = False snake_case_ = {} snake_case_ = False def snake_case__ ( self ): '''simple docstring''' super().setUp() # fmt: off lowercase__ = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"] # fmt: on lowercase__ = dict(zip(_UpperCAmelCase, range(len(_UpperCAmelCase ) ) ) ) lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file, "w", encoding="utf-8" ) as fp: fp.write(json.dumps(_UpperCAmelCase ) + "\n" ) def snake_case__ ( self, **_UpperCAmelCase ): '''simple docstring''' return MgpstrTokenizer.from_pretrained(self.tmpdirname, **_UpperCAmelCase ) def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' lowercase__ = "tester" lowercase__ = "tester" return input_text, output_text @unittest.skip("MGP-STR always lower cases letters." ) def snake_case__ ( self ): '''simple docstring''' pass def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.get_tokenizers(do_lower_case=_UpperCAmelCase ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): lowercase__ = "[SPECIAL_TOKEN]" tokenizer.add_special_tokens({"cls_token": special_token} ) lowercase__ = tokenizer.encode([special_token], add_special_tokens=_UpperCAmelCase ) self.assertEqual(len(_UpperCAmelCase ), 1 ) lowercase__ = tokenizer.decode(_UpperCAmelCase, skip_special_tokens=_UpperCAmelCase ) self.assertTrue(special_token not in decoded ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): lowercase__ , lowercase__ = self.get_input_output_texts(_UpperCAmelCase ) lowercase__ = tokenizer.tokenize(_UpperCAmelCase ) lowercase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) self.assertNotEqual(len(_UpperCAmelCase ), 0 ) lowercase__ = tokenizer.decode(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase ) self.assertEqual(text_a.replace(" ", "" ), _UpperCAmelCase ) @unittest.skip("MGP-STR tokenizer only handles one sequence." ) def snake_case__ ( self ): '''simple docstring''' pass @unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" ) def snake_case__ ( self ): '''simple docstring''' pass
668
1
"""simple docstring""" def __a ( A ): '''simple docstring''' return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number if __name__ == "__main__": print("Program to check whether a number is a Perfect number or not...") lowerCAmelCase_: str = int(input("Enter number: ").strip()) print(F'{number} is {"" if perfect(number) else "not "}a Perfect Number.')
668
"""simple docstring""" from ...utils import is_note_seq_available, is_transformers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .notes_encoder import SpectrogramNotesEncoder from .continous_encoder import SpectrogramContEncoder from .pipeline_spectrogram_diffusion import ( SpectrogramContEncoder, SpectrogramDiffusionPipeline, TaFilmDecoder, ) try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .midi_utils import MidiProcessor
668
1
"""simple docstring""" from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class a__ ( _a ): def __init__( self, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = False, _UpperCAmelCase = False, _UpperCAmelCase = None, **_UpperCAmelCase, ): '''simple docstring''' super().__init__( _UpperCAmelCase, split=_UpperCAmelCase, features=_UpperCAmelCase, cache_dir=_UpperCAmelCase, keep_in_memory=_UpperCAmelCase, streaming=_UpperCAmelCase, num_proc=_UpperCAmelCase, **_UpperCAmelCase, ) lowercase__ = path_or_paths if isinstance(_UpperCAmelCase, _UpperCAmelCase ) else {self.split: path_or_paths} lowercase__ = Text( cache_dir=_UpperCAmelCase, data_files=_UpperCAmelCase, features=_UpperCAmelCase, **_UpperCAmelCase, ) def snake_case__ ( self ): '''simple docstring''' if self.streaming: lowercase__ = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: lowercase__ = None lowercase__ = None lowercase__ = None lowercase__ = None self.builder.download_and_prepare( download_config=_UpperCAmelCase, download_mode=_UpperCAmelCase, verification_mode=_UpperCAmelCase, base_path=_UpperCAmelCase, num_proc=self.num_proc, ) lowercase__ = self.builder.as_dataset( split=self.split, verification_mode=_UpperCAmelCase, in_memory=self.keep_in_memory ) return dataset
668
"""simple docstring""" from typing import Any import numpy as np def __a ( A ): '''simple docstring''' return np.array_equal(A , matrix.conjugate().T ) def __a ( A , A ): '''simple docstring''' lowercase__ = v.conjugate().T lowercase__ = v_star.dot(A ) assert isinstance(A , np.ndarray ) return (v_star_dot.dot(A )) / (v_star.dot(A )) def __a ( ): '''simple docstring''' lowercase__ = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] ) lowercase__ = np.array([[1], [2], [3]] ) assert is_hermitian(A ), f'''{a} is not hermitian.''' print(rayleigh_quotient(A , A ) ) lowercase__ = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] ) assert is_hermitian(A ), f'''{a} is not hermitian.''' assert rayleigh_quotient(A , A ) == float(3 ) if __name__ == "__main__": import doctest doctest.testmod() tests()
668
1
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowerCAmelCase_: List[Any] = logging.get_logger(__name__) lowerCAmelCase_: List[Any] = {"vocab_file": "sentencepiece.bpe.model"} lowerCAmelCase_: Optional[int] = { "vocab_file": { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model", } } lowerCAmelCase_: str = { "camembert-base": 5_1_2, } lowerCAmelCase_: Optional[Any] = "▁" class a__ ( _a ): snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = ["input_ids", "attention_mask"] def __init__( self, _UpperCAmelCase, _UpperCAmelCase="<s>", _UpperCAmelCase="</s>", _UpperCAmelCase="</s>", _UpperCAmelCase="<s>", _UpperCAmelCase="<unk>", _UpperCAmelCase="<pad>", _UpperCAmelCase="<mask>", _UpperCAmelCase=["<s>NOTUSED", "</s>NOTUSED"], _UpperCAmelCase = None, **_UpperCAmelCase, ): '''simple docstring''' lowercase__ = AddedToken(_UpperCAmelCase, lstrip=_UpperCAmelCase, rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase, _UpperCAmelCase ) else mask_token lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_UpperCAmelCase, eos_token=_UpperCAmelCase, unk_token=_UpperCAmelCase, sep_token=_UpperCAmelCase, cls_token=_UpperCAmelCase, pad_token=_UpperCAmelCase, mask_token=_UpperCAmelCase, additional_special_tokens=_UpperCAmelCase, sp_model_kwargs=self.sp_model_kwargs, **_UpperCAmelCase, ) lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_UpperCAmelCase ) ) lowercase__ = vocab_file # HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual # sentencepiece vocabulary (this is the case for <s> and </s> lowercase__ = {"<s>NOTUSED": 0, "<pad>": 1, "</s>NOTUSED": 2, "<unk>": 3} lowercase__ = len(self.fairseq_tokens_to_ids ) lowercase__ = len(self.sp_model ) + len(self.fairseq_tokens_to_ids ) lowercase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowercase__ = [self.cls_token_id] lowercase__ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCAmelCase, token_ids_a=_UpperCAmelCase, already_has_special_tokens=_UpperCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(_UpperCAmelCase )) + [1] return [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] + ([0] * len(_UpperCAmelCase )) + [1] def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase = None ): '''simple docstring''' lowercase__ = [self.sep_token_id] lowercase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def snake_case__ ( self ): '''simple docstring''' return len(self.fairseq_tokens_to_ids ) + len(self.sp_model ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' return self.sp_model.encode(_UpperCAmelCase, out_type=_UpperCAmelCase ) def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] elif self.sp_model.PieceToId(_UpperCAmelCase ) == 0: # Convert sentence piece unk token to fairseq unk token index return self.unk_token_id return self.fairseq_offset + self.sp_model.PieceToId(_UpperCAmelCase ) def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' lowercase__ = [] lowercase__ = "" lowercase__ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_UpperCAmelCase ) + token lowercase__ = True lowercase__ = [] else: current_sub_tokens.append(_UpperCAmelCase ) lowercase__ = False out_string += self.sp_model.decode(_UpperCAmelCase ) return out_string.strip() def __getstate__( self ): '''simple docstring''' lowercase__ = self.__dict__.copy() lowercase__ = None return state def __setstate__( self, _UpperCAmelCase ): '''simple docstring''' lowercase__ = d # for backward compatibility if not hasattr(self, "sp_model_kwargs" ): lowercase__ = {} lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase = None ): '''simple docstring''' if not os.path.isdir(_UpperCAmelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowercase__ = os.path.join( _UpperCAmelCase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file, _UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCAmelCase, "wb" ) as fi: lowercase__ = self.sp_model.serialized_model_proto() fi.write(_UpperCAmelCase ) return (out_vocab_file,)
668
"""simple docstring""" import gc import inspect import unittest import torch from parameterized import parameterized from diffusers import PriorTransformer from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin enable_full_determinism() class a__ ( _a , unittest.TestCase ): snake_case_ = PriorTransformer snake_case_ = "hidden_states" @property def snake_case__ ( self ): '''simple docstring''' lowercase__ = 4 lowercase__ = 8 lowercase__ = 7 lowercase__ = floats_tensor((batch_size, embedding_dim) ).to(_UpperCAmelCase ) lowercase__ = floats_tensor((batch_size, embedding_dim) ).to(_UpperCAmelCase ) lowercase__ = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def snake_case__ ( self, _UpperCAmelCase=0 ): '''simple docstring''' torch.manual_seed(_UpperCAmelCase ) lowercase__ = 4 lowercase__ = 8 lowercase__ = 7 lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase ) lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase ) lowercase__ = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } @property def snake_case__ ( self ): '''simple docstring''' return (4, 8) @property def snake_case__ ( self ): '''simple docstring''' return (4, 8) def snake_case__ ( self ): '''simple docstring''' lowercase__ = { "num_attention_heads": 2, "attention_head_dim": 4, "num_layers": 2, "embedding_dim": 8, "num_embeddings": 7, "additional_embeddings": 4, } lowercase__ = self.dummy_input return init_dict, inputs_dict def snake_case__ ( self ): '''simple docstring''' lowercase__ , lowercase__ = PriorTransformer.from_pretrained( "hf-internal-testing/prior-dummy", output_loading_info=_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) self.assertEqual(len(loading_info["missing_keys"] ), 0 ) model.to(_UpperCAmelCase ) lowercase__ = model(**self.dummy_input )[0] assert hidden_states is not None, "Make sure output is not None" def snake_case__ ( self ): '''simple docstring''' lowercase__ , lowercase__ = self.prepare_init_args_and_inputs_for_common() lowercase__ = self.model_class(**_UpperCAmelCase ) lowercase__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ = [*signature.parameters.keys()] lowercase__ = ["hidden_states", "timestep"] self.assertListEqual(arg_names[:2], _UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy" ) lowercase__ = model.to(_UpperCAmelCase ) if hasattr(_UpperCAmelCase, "set_default_attn_processor" ): model.set_default_attn_processor() lowercase__ = self.get_dummy_seed_input() with torch.no_grad(): lowercase__ = model(**_UpperCAmelCase )[0] lowercase__ = output[0, :5].flatten().cpu() print(_UpperCAmelCase ) # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. lowercase__ = torch.tensor([-1.3_436, -0.2_870, 0.7_538, 0.4_368, -0.0_239] ) self.assertTrue(torch_all_close(_UpperCAmelCase, _UpperCAmelCase, rtol=1E-2 ) ) @slow class a__ ( unittest.TestCase ): def snake_case__ ( self, _UpperCAmelCase=1, _UpperCAmelCase=768, _UpperCAmelCase=77, _UpperCAmelCase=0 ): '''simple docstring''' torch.manual_seed(_UpperCAmelCase ) lowercase__ = batch_size lowercase__ = embedding_dim lowercase__ = num_embeddings lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase ) lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase ) lowercase__ = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def snake_case__ ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @parameterized.expand( [ # fmt: off [13, [-0.5_861, 0.1_283, -0.0_931, 0.0_882, 0.4_476, 0.1_329, -0.0_498, 0.0_640]], [37, [-0.4_913, 0.0_110, -0.0_483, 0.0_541, 0.4_954, -0.0_170, 0.0_354, 0.1_651]], # fmt: on ] ) def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' lowercase__ = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior", subfolder="prior" ) model.to(_UpperCAmelCase ) lowercase__ = self.get_dummy_seed_input(seed=_UpperCAmelCase ) with torch.no_grad(): lowercase__ = model(**_UpperCAmelCase )[0] assert list(sample.shape ) == [1, 768] lowercase__ = sample[0, :8].flatten().cpu() print(_UpperCAmelCase ) lowercase__ = torch.tensor(_UpperCAmelCase ) assert torch_all_close(_UpperCAmelCase, _UpperCAmelCase, atol=1E-3 )
668
1
"""simple docstring""" import argparse import os import jax as jnp import numpy as onp import torch import torch.nn as nn from music_spectrogram_diffusion import inference from tax import checkpoints from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder lowerCAmelCase_: Union[str, Any] = "base_with_context" def __a ( A , A ): '''simple docstring''' lowercase__ = nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"] ) ) lowercase__ = nn.Parameter( torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=A ) for lyr_num, lyr in enumerate(model.encoders ): lowercase__ = weights[f'''layers_{lyr_num}'''] lowercase__ = nn.Parameter( torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) ) lowercase__ = ly_weight["attention"] lowercase__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) ) lowercase__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) ) lowercase__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) ) lowercase__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) ) lowercase__ = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) ) lowercase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) ) lowercase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) ) lowercase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) ) lowercase__ = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) ) return model def __a ( A , A ): '''simple docstring''' lowercase__ = nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T ) ) lowercase__ = nn.Parameter( torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=A ) for lyr_num, lyr in enumerate(model.encoders ): lowercase__ = weights[f'''layers_{lyr_num}'''] lowercase__ = ly_weight["attention"] lowercase__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) ) lowercase__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) ) lowercase__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) ) lowercase__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) ) lowercase__ = nn.Parameter( torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) ) lowercase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) ) lowercase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) ) lowercase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) ) lowercase__ = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) ) lowercase__ = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) ) return model def __a ( A , A ): '''simple docstring''' lowercase__ = nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T ) ) lowercase__ = nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T ) ) lowercase__ = nn.Parameter( torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=A ) lowercase__ = nn.Parameter( torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T ) ) for lyr_num, lyr in enumerate(model.decoders ): lowercase__ = weights[f'''layers_{lyr_num}'''] lowercase__ = nn.Parameter( torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"] ) ) lowercase__ = nn.Parameter( torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T ) ) lowercase__ = ly_weight["self_attention"] lowercase__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) ) lowercase__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) ) lowercase__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) ) lowercase__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) ) lowercase__ = ly_weight["MultiHeadDotProductAttention_0"] lowercase__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) ) lowercase__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) ) lowercase__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) ) lowercase__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) ) lowercase__ = nn.Parameter( torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"] ) ) lowercase__ = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) ) lowercase__ = nn.Parameter( torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T ) ) lowercase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) ) lowercase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) ) lowercase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) ) lowercase__ = nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"] ) ) lowercase__ = nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T ) ) return model def __a ( A ): '''simple docstring''' lowercase__ = checkpoints.load_tax_checkpoint(args.checkpoint_path ) lowercase__ = jnp.tree_util.tree_map(onp.array , A ) lowercase__ = [ "from __gin__ import dynamic_registration", "from music_spectrogram_diffusion.models.diffusion import diffusion_utils", "diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0", "diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()", ] lowercase__ = os.path.join(args.checkpoint_path , ".." , "config.gin" ) lowercase__ = inference.parse_training_gin_file(A , A ) lowercase__ = inference.InferenceModel(args.checkpoint_path , A ) lowercase__ = DDPMScheduler(beta_schedule="squaredcos_cap_v2" , variance_type="fixed_large" ) lowercase__ = SpectrogramNotesEncoder( max_length=synth_model.sequence_length["inputs"] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , ) lowercase__ = SpectrogramContEncoder( input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["targets_context"] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , ) lowercase__ = TaFilmDecoder( input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["targets_context"] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , ) lowercase__ = load_notes_encoder(ta_checkpoint["target"]["token_encoder"] , A ) lowercase__ = load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"] , A ) lowercase__ = load_decoder(ta_checkpoint["target"]["decoder"] , A ) lowercase__ = OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder" ) lowercase__ = SpectrogramDiffusionPipeline( notes_encoder=A , continuous_encoder=A , decoder=A , scheduler=A , melgan=A , ) if args.save: pipe.save_pretrained(args.output_path ) if __name__ == "__main__": lowerCAmelCase_: Optional[int] = argparse.ArgumentParser() parser.add_argument("--output_path", default=None, type=str, required=True, help="Path to the converted model.") parser.add_argument( "--save", default=True, type=bool, required=False, help="Whether to save the converted model or not." ) parser.add_argument( "--checkpoint_path", default=F'{MODEL}/checkpoint_500000', type=str, required=False, help="Path to the original jax model checkpoint.", ) lowerCAmelCase_: Union[str, Any] = parser.parse_args() main(args)
668
"""simple docstring""" lowerCAmelCase_: Any = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" def __a ( A ): '''simple docstring''' if not isinstance(A , A ): lowercase__ = f'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(A ) lowercase__ = "".join(bin(A )[2:].zfill(8 ) for byte in data ) lowercase__ = len(A ) % 6 != 0 if padding_needed: # The padding that will be added later lowercase__ = b"=" * ((6 - len(A ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(A ) % 6) else: lowercase__ = b"" # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(A ) , 6 ) ).encode() + padding ) def __a ( A ): '''simple docstring''' if not isinstance(A , A ) and not isinstance(A , A ): lowercase__ = ( "argument should be a bytes-like object or ASCII string, " f'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(A ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(A , A ): try: lowercase__ = encoded_data.decode("utf-8" ) except UnicodeDecodeError: raise ValueError("base64 encoded data should only contain ASCII characters" ) lowercase__ = encoded_data.count("=" ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(A ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one lowercase__ = encoded_data[:-padding] lowercase__ = "".join( bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: lowercase__ = "".join( bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data ) lowercase__ = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(A ) , 8 ) ] return bytes(A ) if __name__ == "__main__": import doctest doctest.testmod()
668
1
"""simple docstring""" import copy from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig if TYPE_CHECKING: from ... import PreTrainedTokenizerBase, TensorType lowerCAmelCase_: Optional[Any] = logging.get_logger(__name__) class a__ ( _a ): snake_case_ = "vision-encoder-decoder" snake_case_ = True def __init__( self, **_UpperCAmelCase ): '''simple docstring''' super().__init__(**_UpperCAmelCase ) if "encoder" not in kwargs or "decoder" not in kwargs: raise ValueError( F'''A configuraton of type {self.model_type} cannot be instantiated because ''' F'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' ) lowercase__ = kwargs.pop("encoder" ) lowercase__ = encoder_config.pop("model_type" ) lowercase__ = kwargs.pop("decoder" ) lowercase__ = decoder_config.pop("model_type" ) lowercase__ = AutoConfig.for_model(_UpperCAmelCase, **_UpperCAmelCase ) lowercase__ = AutoConfig.for_model(_UpperCAmelCase, **_UpperCAmelCase ) lowercase__ = True @classmethod def snake_case__ ( cls, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ): '''simple docstring''' logger.info("Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" ) lowercase__ = True lowercase__ = True return cls(encoder=encoder_config.to_dict(), decoder=decoder_config.to_dict(), **_UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = copy.deepcopy(self.__dict__ ) lowercase__ = self.encoder.to_dict() lowercase__ = self.decoder.to_dict() lowercase__ = self.__class__.model_type return output class a__ ( _a ): snake_case_ = version.parse("1.11" ) @property def snake_case__ ( self ): '''simple docstring''' return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def snake_case__ ( self ): '''simple docstring''' return 1E-4 @property def snake_case__ ( self ): '''simple docstring''' return OrderedDict({"last_hidden_state": {0: "batch", 1: "encoder_sequence"}} ) class a__ ( _a ): @property def snake_case__ ( self ): '''simple docstring''' lowercase__ = OrderedDict() lowercase__ = {0: "batch", 1: "past_decoder_sequence + sequence"} lowercase__ = {0: "batch", 1: "past_decoder_sequence + sequence"} lowercase__ = {0: "batch", 1: "encoder_sequence"} return common_inputs def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase = -1, _UpperCAmelCase = -1, _UpperCAmelCase = False, _UpperCAmelCase = None, ): '''simple docstring''' import torch lowercase__ = OrderedDict() lowercase__ = super().generate_dummy_inputs( _UpperCAmelCase, batch_size=_UpperCAmelCase, seq_length=_UpperCAmelCase, is_pair=_UpperCAmelCase, framework=_UpperCAmelCase ) lowercase__ , lowercase__ = dummy_input["input_ids"].shape lowercase__ = (batch, encoder_sequence, self._config.encoder_hidden_size) lowercase__ = dummy_input.pop("input_ids" ) lowercase__ = dummy_input.pop("attention_mask" ) lowercase__ = torch.zeros(_UpperCAmelCase ) return common_inputs class a__ ( _a ): @property def snake_case__ ( self ): '''simple docstring''' pass def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' return VisionEncoderDecoderEncoderOnnxConfig(_UpperCAmelCase ) def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase = "default" ): '''simple docstring''' lowercase__ = encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(_UpperCAmelCase, _UpperCAmelCase )
668
"""simple docstring""" from sympy import diff, lambdify, symbols from sympy.functions import * # noqa: F403 def __a ( A , A , A = "x" , A = 10**-10 , A = 1 , ): '''simple docstring''' lowercase__ = symbols(A ) lowercase__ = lambdify(A , A ) lowercase__ = lambdify(A , diff(A , A ) ) lowercase__ = starting_point while True: if diff_function(A ) != 0: lowercase__ = prev_guess - multiplicity * func(A ) / diff_function( A ) else: raise ZeroDivisionError("Could not find root" ) from None # Precision is checked by comparing the difference of consecutive guesses if abs(next_guess - prev_guess ) < precision: return next_guess lowercase__ = next_guess # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(F'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}') # Find root of polynomial # Find fourth Root of 5 print(F'The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}') # Find value of e print( "The root of log(y) - 1 = 0 is ", F'{newton_raphson("log(y) - 1", 2, variable="y")}', ) # Exponential Roots print( "The root of exp(x) - 1 = 0 is", F'{newton_raphson("exp(x) - 1", 1_0, precision=0.005)}', ) # Find root of cos(x) print(F'The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}')
668
1
"""simple docstring""" from typing import Any def __a ( A , A , A , A , A , ): '''simple docstring''' _validation( A , A , A , A , A , ) # Creates data structures and fill initial step lowercase__ = {} lowercase__ = {} for state in states_space: lowercase__ = observations_space[0] lowercase__ = ( initial_probabilities[state] * emission_probabilities[state][observation] ) lowercase__ = None # Fills the data structure with the probabilities of # different transitions and pointers to previous states for o in range(1 , len(A ) ): lowercase__ = observations_space[o] lowercase__ = observations_space[o - 1] for state in states_space: # Calculates the argmax for probability function lowercase__ = "" lowercase__ = -1 for k_state in states_space: lowercase__ = ( probabilities[(k_state, prior_observation)] * transition_probabilities[k_state][state] * emission_probabilities[state][observation] ) if probability > max_probability: lowercase__ = probability lowercase__ = k_state # Update probabilities and pointers dicts lowercase__ = ( probabilities[(arg_max, prior_observation)] * transition_probabilities[arg_max][state] * emission_probabilities[state][observation] ) lowercase__ = arg_max # The final observation lowercase__ = observations_space[len(A ) - 1] # argmax for given final observation lowercase__ = "" lowercase__ = -1 for k_state in states_space: lowercase__ = probabilities[(k_state, final_observation)] if probability > max_probability: lowercase__ = probability lowercase__ = k_state lowercase__ = arg_max # Process pointers backwards lowercase__ = last_state lowercase__ = [] for o in range(len(A ) - 1 , -1 , -1 ): result.append(A ) lowercase__ = pointers[previous, observations_space[o]] result.reverse() return result def __a ( A , A , A , A , A , ): '''simple docstring''' _validate_not_empty( A , A , A , A , A , ) _validate_lists(A , A ) _validate_dicts( A , A , A ) def __a ( A , A , A , A , A , ): '''simple docstring''' if not all( [ observations_space, states_space, initial_probabilities, transition_probabilities, emission_probabilities, ] ): raise ValueError("There's an empty parameter" ) def __a ( A , A ): '''simple docstring''' _validate_list(A , "observations_space" ) _validate_list(A , "states_space" ) def __a ( A , A ): '''simple docstring''' if not isinstance(_object , A ): lowercase__ = f'''{var_name} must be a list''' raise ValueError(A ) else: for x in _object: if not isinstance(A , A ): lowercase__ = f'''{var_name} must be a list of strings''' raise ValueError(A ) def __a ( A , A , A , ): '''simple docstring''' _validate_dict(A , "initial_probabilities" , A ) _validate_nested_dict(A , "transition_probabilities" ) _validate_nested_dict(A , "emission_probabilities" ) def __a ( A , A ): '''simple docstring''' _validate_dict(_object , A , A ) for x in _object.values(): _validate_dict(A , A , A , A ) def __a ( A , A , A , A = False ): '''simple docstring''' if not isinstance(_object , A ): lowercase__ = f'''{var_name} must be a dict''' raise ValueError(A ) if not all(isinstance(A , A ) for x in _object ): lowercase__ = f'''{var_name} all keys must be strings''' raise ValueError(A ) if not all(isinstance(A , A ) for x in _object.values() ): lowercase__ = "nested dictionary " if nested else "" lowercase__ = f'''{var_name} {nested_text}all values must be {value_type.__name__}''' raise ValueError(A ) if __name__ == "__main__": from doctest import testmod testmod()
668
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase_: Union[str, Any] = { "configuration_distilbert": [ "DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DistilBertConfig", "DistilBertOnnxConfig", ], "tokenization_distilbert": ["DistilBertTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_: Union[str, Any] = ["DistilBertTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_: Any = [ "DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "DistilBertForMaskedLM", "DistilBertForMultipleChoice", "DistilBertForQuestionAnswering", "DistilBertForSequenceClassification", "DistilBertForTokenClassification", "DistilBertModel", "DistilBertPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_: Tuple = [ "TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFDistilBertForMaskedLM", "TFDistilBertForMultipleChoice", "TFDistilBertForQuestionAnswering", "TFDistilBertForSequenceClassification", "TFDistilBertForTokenClassification", "TFDistilBertMainLayer", "TFDistilBertModel", "TFDistilBertPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_: Optional[Any] = [ "FlaxDistilBertForMaskedLM", "FlaxDistilBertForMultipleChoice", "FlaxDistilBertForQuestionAnswering", "FlaxDistilBertForSequenceClassification", "FlaxDistilBertForTokenClassification", "FlaxDistilBertModel", "FlaxDistilBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_distilbert import ( DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertOnnxConfig, ) from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel, ) else: import sys lowerCAmelCase_: Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
668
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowerCAmelCase_: List[str] = { "configuration_mega": ["MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegaConfig", "MegaOnnxConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_: Optional[int] = [ "MEGA_PRETRAINED_MODEL_ARCHIVE_LIST", "MegaForCausalLM", "MegaForMaskedLM", "MegaForMultipleChoice", "MegaForQuestionAnswering", "MegaForSequenceClassification", "MegaForTokenClassification", "MegaModel", "MegaPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mega import ( MEGA_PRETRAINED_MODEL_ARCHIVE_LIST, MegaForCausalLM, MegaForMaskedLM, MegaForMultipleChoice, MegaForQuestionAnswering, MegaForSequenceClassification, MegaForTokenClassification, MegaModel, MegaPreTrainedModel, ) else: import sys lowerCAmelCase_: Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
668
"""simple docstring""" from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging lowerCAmelCase_: Union[str, Any] = logging.get_logger(__name__) class a__ ( _a ): snake_case_ = ["audio_values", "audio_mask"] def __init__( self, _UpperCAmelCase=2048, _UpperCAmelCase=1, _UpperCAmelCase=[16, 16], _UpperCAmelCase=128, _UpperCAmelCase=4_4100, _UpperCAmelCase=86, _UpperCAmelCase=2048, _UpperCAmelCase=0.0, **_UpperCAmelCase, ): '''simple docstring''' super().__init__( feature_size=_UpperCAmelCase, sampling_rate=_UpperCAmelCase, padding_value=_UpperCAmelCase, **_UpperCAmelCase, ) lowercase__ = spectrogram_length lowercase__ = num_channels lowercase__ = patch_size lowercase__ = feature_size // self.patch_size[1] lowercase__ = n_fft lowercase__ = sampling_rate // hop_length_to_sampling_rate lowercase__ = sampling_rate lowercase__ = padding_value lowercase__ = mel_filter_bank( num_frequency_bins=1 + n_fft // 2, num_mel_filters=_UpperCAmelCase, min_frequency=0.0, max_frequency=22_050.0, sampling_rate=_UpperCAmelCase, norm="slaney", mel_scale="slaney", ).T def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' lowercase__ = spectrogram( _UpperCAmelCase, window_function(self.n_fft, "hann" ), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters.T, log_mel="dB", db_range=80.0, ) lowercase__ = log_spec[:, :-1] lowercase__ = log_spec - 20.0 lowercase__ = np.clip(log_spec / 40.0, -2.0, 0.0 ) + 1.0 return log_spec def __call__( self, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = True, _UpperCAmelCase = None, _UpperCAmelCase = False, _UpperCAmelCase = False, **_UpperCAmelCase, ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( "This feature extractor is set to support sampling rate" F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled''' F''' with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) lowercase__ = isinstance(_UpperCAmelCase, np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) lowercase__ = is_batched_numpy or ( isinstance(_UpperCAmelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) )) ) if is_batched: lowercase__ = [np.asarray([speech], dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(_UpperCAmelCase, np.ndarray ): lowercase__ = np.asarray(_UpperCAmelCase, dtype=np.floataa ) elif isinstance(_UpperCAmelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowercase__ = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowercase__ = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis lowercase__ = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0], _UpperCAmelCase ): lowercase__ = [np.asarray(_UpperCAmelCase, dtype=np.floataa ) for feature in audio_features] # Create audio attention mask lowercase__ = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: lowercase__ = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] lowercase__ = np.array(_UpperCAmelCase ).astype(np.floataa ) # convert into correct format for padding lowercase__ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch lowercase__ = np.ones([len(_UpperCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) lowercase__ = padded_audio_features * self.padding_value for i in range(len(_UpperCAmelCase ) ): lowercase__ = audio_features[i] lowercase__ = feature # return as BatchFeature if return_attention_mask: lowercase__ = {"audio_values": padded_audio_features, "audio_mask": audio_mask} else: lowercase__ = {"audio_values": padded_audio_features} lowercase__ = BatchFeature(data=_UpperCAmelCase, tensor_type=_UpperCAmelCase ) return encoded_inputs
668
1
"""simple docstring""" import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class a__ ( _a ): snake_case_ = (DDIMParallelScheduler,) snake_case_ = (("eta", 0.0), ("num_inference_steps", 50)) def snake_case__ ( self, **_UpperCAmelCase ): '''simple docstring''' lowercase__ = { "num_train_timesteps": 1000, "beta_start": 0.0_001, "beta_end": 0.02, "beta_schedule": "linear", "clip_sample": True, } config.update(**_UpperCAmelCase ) return config def snake_case__ ( self, **_UpperCAmelCase ): '''simple docstring''' lowercase__ = self.scheduler_classes[0] lowercase__ = self.get_scheduler_config(**_UpperCAmelCase ) lowercase__ = scheduler_class(**_UpperCAmelCase ) lowercase__ , lowercase__ = 10, 0.0 lowercase__ = self.dummy_model() lowercase__ = self.dummy_sample_deter scheduler.set_timesteps(_UpperCAmelCase ) for t in scheduler.timesteps: lowercase__ = model(_UpperCAmelCase, _UpperCAmelCase ) lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ).prev_sample return sample def snake_case__ ( self ): '''simple docstring''' for timesteps in [100, 500, 1000]: self.check_over_configs(num_train_timesteps=_UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' for steps_offset in [0, 1]: self.check_over_configs(steps_offset=_UpperCAmelCase ) lowercase__ = self.scheduler_classes[0] lowercase__ = self.get_scheduler_config(steps_offset=1 ) lowercase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(5 ) assert torch.equal(scheduler.timesteps, torch.LongTensor([801, 601, 401, 201, 1] ) ) def snake_case__ ( self ): '''simple docstring''' for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=_UpperCAmelCase, beta_end=_UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=_UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=_UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=_UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' self.check_over_configs(thresholding=_UpperCAmelCase ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=_UpperCAmelCase, prediction_type=_UpperCAmelCase, sample_max_value=_UpperCAmelCase, ) def snake_case__ ( self ): '''simple docstring''' for t in [1, 10, 49]: self.check_over_forward(time_step=_UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' for t, num_inference_steps in zip([1, 10, 50], [10, 50, 500] ): self.check_over_forward(time_step=_UpperCAmelCase, num_inference_steps=_UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' for t, eta in zip([1, 10, 49], [0.0, 0.5, 1.0] ): self.check_over_forward(time_step=_UpperCAmelCase, eta=_UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.scheduler_classes[0] lowercase__ = self.get_scheduler_config() lowercase__ = scheduler_class(**_UpperCAmelCase ) assert torch.sum(torch.abs(scheduler._get_variance(0, 0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(420, 400 ) - 0.14_771 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(980, 960 ) - 0.32_460 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(0, 0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487, 486 ) - 0.00_979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999, 998 ) - 0.02 ) ) < 1E-5 def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.scheduler_classes[0] lowercase__ = self.get_scheduler_config() lowercase__ = scheduler_class(**_UpperCAmelCase ) lowercase__ , lowercase__ = 10, 0.0 scheduler.set_timesteps(_UpperCAmelCase ) lowercase__ = self.dummy_model() lowercase__ = self.dummy_sample_deter lowercase__ = self.dummy_sample_deter + 0.1 lowercase__ = self.dummy_sample_deter - 0.1 lowercase__ = samplea.shape[0] lowercase__ = torch.stack([samplea, samplea, samplea], dim=0 ) lowercase__ = torch.arange(_UpperCAmelCase )[0:3, None].repeat(1, _UpperCAmelCase ) lowercase__ = model(samples.flatten(0, 1 ), timesteps.flatten(0, 1 ) ) lowercase__ = scheduler.batch_step_no_noise(_UpperCAmelCase, timesteps.flatten(0, 1 ), samples.flatten(0, 1 ), _UpperCAmelCase ) lowercase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) lowercase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 1_147.7_904 ) < 1E-2 assert abs(result_mean.item() - 0.4_982 ) < 1E-3 def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.full_loop() lowercase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) lowercase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 172.0_067 ) < 1E-2 assert abs(result_mean.item() - 0.223_967 ) < 1E-3 def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.full_loop(prediction_type="v_prediction" ) lowercase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) lowercase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 52.5_302 ) < 1E-2 assert abs(result_mean.item() - 0.0_684 ) < 1E-3 def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.full_loop(set_alpha_to_one=_UpperCAmelCase, beta_start=0.01 ) lowercase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) lowercase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 149.8_295 ) < 1E-2 assert abs(result_mean.item() - 0.1_951 ) < 1E-3 def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.full_loop(set_alpha_to_one=_UpperCAmelCase, beta_start=0.01 ) lowercase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) lowercase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 149.0_784 ) < 1E-2 assert abs(result_mean.item() - 0.1_941 ) < 1E-3
668
"""simple docstring""" from __future__ import annotations import math def __a ( A ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True lowerCAmelCase_: Optional[Any] = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)] def __a ( A ): '''simple docstring''' if not isinstance(A , A ): raise ValueError("n must be an integer" ) if n <= 0: raise ValueError("n must be >= 0" ) lowercase__ = [] for num in range(len(A ) ): lowercase__ = 0 while 2 * i * i <= odd_composites[num]: lowercase__ = odd_composites[num] - 2 * i * i if is_prime(A ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(A ) == n: return list_nums return [] def __a ( ): '''simple docstring''' return compute_nums(1 )[0] if __name__ == "__main__": print(F'{solution() = }')
668
1
"""simple docstring""" from sympy import diff, lambdify, symbols from sympy.functions import * # noqa: F403 def __a ( A , A , A = "x" , A = 10**-10 , A = 1 , ): '''simple docstring''' lowercase__ = symbols(A ) lowercase__ = lambdify(A , A ) lowercase__ = lambdify(A , diff(A , A ) ) lowercase__ = starting_point while True: if diff_function(A ) != 0: lowercase__ = prev_guess - multiplicity * func(A ) / diff_function( A ) else: raise ZeroDivisionError("Could not find root" ) from None # Precision is checked by comparing the difference of consecutive guesses if abs(next_guess - prev_guess ) < precision: return next_guess lowercase__ = next_guess # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(F'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}') # Find root of polynomial # Find fourth Root of 5 print(F'The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}') # Find value of e print( "The root of log(y) - 1 = 0 is ", F'{newton_raphson("log(y) - 1", 2, variable="y")}', ) # Exponential Roots print( "The root of exp(x) - 1 = 0 is", F'{newton_raphson("exp(x) - 1", 1_0, precision=0.005)}', ) # Find root of cos(x) print(F'The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}')
668
"""simple docstring""" import os import sys lowerCAmelCase_: Any = os.path.join(os.path.dirname(__file__), "src") sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) lowerCAmelCase_: Union[str, Any] = [ "torch", "numpy", "tokenizers", "filelock", "requests", "tqdm", "regex", "sentencepiece", "sacremoses", "importlib_metadata", "huggingface_hub", ] @add_start_docstrings(AutoConfig.__doc__ ) def __a ( *A , **A ): '''simple docstring''' return AutoConfig.from_pretrained(*A , **A ) @add_start_docstrings(AutoTokenizer.__doc__ ) def __a ( *A , **A ): '''simple docstring''' return AutoTokenizer.from_pretrained(*A , **A ) @add_start_docstrings(AutoModel.__doc__ ) def __a ( *A , **A ): '''simple docstring''' return AutoModel.from_pretrained(*A , **A ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def __a ( *A , **A ): '''simple docstring''' return AutoModelForCausalLM.from_pretrained(*A , **A ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def __a ( *A , **A ): '''simple docstring''' return AutoModelForMaskedLM.from_pretrained(*A , **A ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def __a ( *A , **A ): '''simple docstring''' return AutoModelForSequenceClassification.from_pretrained(*A , **A ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def __a ( *A , **A ): '''simple docstring''' return AutoModelForQuestionAnswering.from_pretrained(*A , **A )
668
1
"""simple docstring""" from ...utils import is_note_seq_available, is_transformers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .notes_encoder import SpectrogramNotesEncoder from .continous_encoder import SpectrogramContEncoder from .pipeline_spectrogram_diffusion import ( SpectrogramContEncoder, SpectrogramDiffusionPipeline, TaFilmDecoder, ) try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .midi_utils import MidiProcessor
668
"""simple docstring""" import unittest from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow if is_flax_available(): import jax from transformers.models.auto.modeling_flax_auto import FlaxAutoModel from transformers.models.bert.modeling_flax_bert import FlaxBertModel from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel @require_flax class a__ ( unittest.TestCase ): @slow def snake_case__ ( self ): '''simple docstring''' for model_name in ["bert-base-cased", "bert-large-uncased"]: with self.subTest(_UpperCAmelCase ): lowercase__ = AutoConfig.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase ) lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase ) @slow def snake_case__ ( self ): '''simple docstring''' for model_name in ["roberta-base", "roberta-large"]: with self.subTest(_UpperCAmelCase ): lowercase__ = AutoConfig.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase ) lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase ) @slow def snake_case__ ( self ): '''simple docstring''' for model_name in ["bert-base-cased", "bert-large-uncased"]: lowercase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase ) lowercase__ = FlaxBertModel.from_pretrained(_UpperCAmelCase ) lowercase__ = tokenizer("Do you support jax jitted function?", return_tensors=TensorType.JAX ) @jax.jit def eval(**_UpperCAmelCase ): return model(**_UpperCAmelCase ) eval(**_UpperCAmelCase ).block_until_ready() @slow def snake_case__ ( self ): '''simple docstring''' for model_name in ["roberta-base", "roberta-large"]: lowercase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase ) lowercase__ = FlaxRobertaModel.from_pretrained(_UpperCAmelCase ) lowercase__ = tokenizer("Do you support jax jitted function?", return_tensors=TensorType.JAX ) @jax.jit def eval(**_UpperCAmelCase ): return model(**_UpperCAmelCase ) eval(**_UpperCAmelCase ).block_until_ready() def snake_case__ ( self ): '''simple docstring''' with self.assertRaisesRegex( _UpperCAmelCase, "bert-base is not a local folder and is not a valid model identifier" ): lowercase__ = FlaxAutoModel.from_pretrained("bert-base" ) def snake_case__ ( self ): '''simple docstring''' with self.assertRaisesRegex( _UpperCAmelCase, R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase, revision="aaaaaa" ) def snake_case__ ( self ): '''simple docstring''' with self.assertRaisesRegex( _UpperCAmelCase, "hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack", ): lowercase__ = FlaxAutoModel.from_pretrained("hf-internal-testing/config-no-model" ) def snake_case__ ( self ): '''simple docstring''' with self.assertRaisesRegex(_UpperCAmelCase, "Use `from_pt=True` to load this model" ): lowercase__ = FlaxAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
668
1
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from importlib import import_module from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch import nn from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process lowerCAmelCase_: Tuple = logging.getLogger(__name__) @dataclass class a__ : snake_case_ = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) snake_case_ = field( default=_a , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) snake_case_ = field( default="NER" , metadata={"help": "Task type to fine tune in training (e.g. NER, POS, etc)"} ) snake_case_ = field( default=_a , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) snake_case_ = field(default=_a , metadata={"help": "Set this flag to use fast tokenization."} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. snake_case_ = field( default=_a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) @dataclass class a__ : snake_case_ = field( metadata={"help": "The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."} ) snake_case_ = field( default=_a , metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."} , ) snake_case_ = field( default=128 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) snake_case_ = field( default=_a , metadata={"help": "Overwrite the cached training and evaluation sets"} ) def __a ( ): '''simple docstring''' lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowercase__ , lowercase__ , lowercase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowercase__ , lowercase__ , lowercase__ = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' " --overwrite_output_dir to overcome." ) lowercase__ = import_module("tasks" ) try: lowercase__ = getattr(A , model_args.task_type ) lowercase__ = token_classification_task_clazz() except AttributeError: raise ValueError( f'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. ''' f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("Training/evaluation parameters %s" , A ) # Set seed set_seed(training_args.seed ) # Prepare CONLL-2003 task lowercase__ = token_classification_task.get_labels(data_args.labels ) lowercase__ = dict(enumerate(A ) ) lowercase__ = len(A ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowercase__ = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=A , idalabel=A , labelaid={label: i for i, label in enumerate(A )} , cache_dir=model_args.cache_dir , ) lowercase__ = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , ) lowercase__ = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , ) # Get datasets lowercase__ = ( TokenClassificationDataset( token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) lowercase__ = ( TokenClassificationDataset( token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def align_predictions(A , A ) -> Tuple[List[int], List[int]]: lowercase__ = np.argmax(A , axis=2 ) lowercase__ , lowercase__ = preds.shape lowercase__ = [[] for _ in range(A )] lowercase__ = [[] for _ in range(A )] for i in range(A ): for j in range(A ): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) return preds_list, out_label_list def compute_metrics(A ) -> Dict: lowercase__ , lowercase__ = align_predictions(p.predictions , p.label_ids ) return { "accuracy_score": accuracy_score(A , A ), "precision": precision_score(A , A ), "recall": recall_score(A , A ), "f1": fa_score(A , A ), } # Data collator lowercase__ = DataCollatorWithPadding(A , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer lowercase__ = Trainer( model=A , args=A , train_dataset=A , eval_dataset=A , compute_metrics=A , data_collator=A , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_process_zero(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation lowercase__ = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) lowercase__ = trainer.evaluate() lowercase__ = os.path.join(training_args.output_dir , "eval_results.txt" ) if trainer.is_world_process_zero(): with open(A , "w" ) as writer: logger.info("***** Eval results *****" ) for key, value in result.items(): logger.info(" %s = %s" , A , A ) writer.write("%s = %s\n" % (key, value) ) results.update(A ) # Predict if training_args.do_predict: lowercase__ = TokenClassificationDataset( token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , ) lowercase__ , lowercase__ , lowercase__ = trainer.predict(A ) lowercase__ , lowercase__ = align_predictions(A , A ) lowercase__ = os.path.join(training_args.output_dir , "test_results.txt" ) if trainer.is_world_process_zero(): with open(A , "w" ) as writer: for key, value in metrics.items(): logger.info(" %s = %s" , A , A ) writer.write("%s = %s\n" % (key, value) ) # Save predictions lowercase__ = os.path.join(training_args.output_dir , "test_predictions.txt" ) if trainer.is_world_process_zero(): with open(A , "w" ) as writer: with open(os.path.join(data_args.data_dir , "test.txt" ) , "r" ) as f: token_classification_task.write_predictions_to_file(A , A , A ) return results def __a ( A ): '''simple docstring''' main() if __name__ == "__main__": main()
668
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase_: str = logging.get_logger(__name__) lowerCAmelCase_: List[Any] = { "facebook/data2vec-vision-base-ft": ( "https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json" ), } class a__ ( _a ): snake_case_ = "data2vec-vision" def __init__( self, _UpperCAmelCase=768, _UpperCAmelCase=12, _UpperCAmelCase=12, _UpperCAmelCase=3072, _UpperCAmelCase="gelu", _UpperCAmelCase=0.0, _UpperCAmelCase=0.0, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-12, _UpperCAmelCase=224, _UpperCAmelCase=16, _UpperCAmelCase=3, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=True, _UpperCAmelCase=[3, 5, 7, 11], _UpperCAmelCase=[1, 2, 3, 6], _UpperCAmelCase=True, _UpperCAmelCase=0.4, _UpperCAmelCase=256, _UpperCAmelCase=1, _UpperCAmelCase=False, _UpperCAmelCase=255, **_UpperCAmelCase, ): '''simple docstring''' super().__init__(**_UpperCAmelCase ) lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = initializer_range lowercase__ = layer_norm_eps lowercase__ = image_size lowercase__ = patch_size lowercase__ = num_channels lowercase__ = use_mask_token lowercase__ = use_absolute_position_embeddings lowercase__ = use_relative_position_bias lowercase__ = use_shared_relative_position_bias lowercase__ = layer_scale_init_value lowercase__ = drop_path_rate lowercase__ = use_mean_pooling # decode head attributes (semantic segmentation) lowercase__ = out_indices lowercase__ = pool_scales # auxiliary head attributes (semantic segmentation) lowercase__ = use_auxiliary_head lowercase__ = auxiliary_loss_weight lowercase__ = auxiliary_channels lowercase__ = auxiliary_num_convs lowercase__ = auxiliary_concat_input lowercase__ = semantic_loss_ignore_index class a__ ( _a ): snake_case_ = version.parse("1.11" ) @property def snake_case__ ( self ): '''simple docstring''' return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def snake_case__ ( self ): '''simple docstring''' return 1E-4
668
1
"""simple docstring""" import sys def __a ( A ): '''simple docstring''' lowercase__ = len(A ) lowercase__ = [[0 for x in range(A )] for x in range(A )] lowercase__ = [[0 for x in range(A )] for x in range(A )] for chain_length in range(2 , A ): for a in range(1 , n - chain_length + 1 ): lowercase__ = a + chain_length - 1 lowercase__ = sys.maxsize for c in range(A , A ): lowercase__ = ( matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b] ) if cost < matrix[a][b]: lowercase__ = cost lowercase__ = c return matrix, sol def __a ( A , A , A ): '''simple docstring''' if i == j: print("A" + str(A ) , end=" " ) else: print("(" , end=" " ) print_optiomal_solution(A , A , optimal_solution[i][j] ) print_optiomal_solution(A , optimal_solution[i][j] + 1 , A ) print(")" , end=" " ) def __a ( ): '''simple docstring''' lowercase__ = [30, 35, 15, 5, 10, 20, 25] lowercase__ = len(A ) # Size of matrix created from above array will be # 30*35 35*15 15*5 5*10 10*20 20*25 lowercase__ , lowercase__ = matrix_chain_order(A ) print("No. of Operation required: " + str(matrix[1][n - 1] ) ) print_optiomal_solution(A , 1 , n - 1 ) if __name__ == "__main__": main()
668
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_: List[Any] = logging.get_logger(__name__) lowerCAmelCase_: int = { "microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json", "microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json", } class a__ ( _a ): snake_case_ = "markuplm" def __init__( self, _UpperCAmelCase=3_0522, _UpperCAmelCase=768, _UpperCAmelCase=12, _UpperCAmelCase=12, _UpperCAmelCase=3072, _UpperCAmelCase="gelu", _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=512, _UpperCAmelCase=2, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-12, _UpperCAmelCase=0, _UpperCAmelCase=0, _UpperCAmelCase=2, _UpperCAmelCase=256, _UpperCAmelCase=1024, _UpperCAmelCase=216, _UpperCAmelCase=1001, _UpperCAmelCase=32, _UpperCAmelCase=50, _UpperCAmelCase="absolute", _UpperCAmelCase=True, _UpperCAmelCase=None, **_UpperCAmelCase, ): '''simple docstring''' super().__init__( pad_token_id=_UpperCAmelCase, bos_token_id=_UpperCAmelCase, eos_token_id=_UpperCAmelCase, **_UpperCAmelCase, ) lowercase__ = vocab_size lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = hidden_act lowercase__ = intermediate_size lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = max_position_embeddings lowercase__ = type_vocab_size lowercase__ = initializer_range lowercase__ = layer_norm_eps lowercase__ = position_embedding_type lowercase__ = use_cache lowercase__ = classifier_dropout # additional properties lowercase__ = max_depth lowercase__ = max_xpath_tag_unit_embeddings lowercase__ = max_xpath_subs_unit_embeddings lowercase__ = tag_pad_id lowercase__ = subs_pad_id lowercase__ = xpath_unit_hidden_size
668
1
"""simple docstring""" import importlib.metadata from typing import Union from packaging.version import Version, parse from .constants import STR_OPERATION_TO_FUNC lowerCAmelCase_: List[Any] = parse(importlib.metadata.version("torch")) def __a ( A , A , A ): '''simple docstring''' if operation not in STR_OPERATION_TO_FUNC.keys(): raise ValueError(f'''`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}''' ) lowercase__ = STR_OPERATION_TO_FUNC[operation] if isinstance(A , A ): lowercase__ = parse(importlib.metadata.version(A ) ) return operation(A , parse(A ) ) def __a ( A , A ): '''simple docstring''' return compare_versions(A , A , A )
668
"""simple docstring""" lowerCAmelCase_: Union[str, Any] = [ 9_9_9, 8_0_0, 7_9_9, 6_0_0, 5_9_9, 5_0_0, 4_0_0, 3_9_9, 3_7_7, 3_5_5, 3_3_3, 3_1_1, 2_8_8, 2_6_6, 2_4_4, 2_2_2, 2_0_0, 1_9_9, 1_7_7, 1_5_5, 1_3_3, 1_1_1, 8_8, 6_6, 4_4, 2_2, 0, ] lowerCAmelCase_: List[str] = [ 9_9_9, 9_7_6, 9_5_2, 9_2_8, 9_0_5, 8_8_2, 8_5_8, 8_5_7, 8_1_0, 7_6_2, 7_1_5, 7_1_4, 5_7_2, 4_2_9, 4_2_8, 2_8_6, 2_8_5, 2_3_8, 1_9_0, 1_4_3, 1_4_2, 1_1_8, 9_5, 7_1, 4_7, 2_4, 0, ] lowerCAmelCase_: List[str] = [ 9_9_9, 9_8_8, 9_7_7, 9_6_6, 9_5_5, 9_4_4, 9_3_3, 9_2_2, 9_1_1, 9_0_0, 8_9_9, 8_7_9, 8_5_9, 8_4_0, 8_2_0, 8_0_0, 7_9_9, 7_6_6, 7_3_3, 7_0_0, 6_9_9, 6_5_0, 6_0_0, 5_9_9, 5_0_0, 4_9_9, 4_0_0, 3_9_9, 3_5_0, 3_0_0, 2_9_9, 2_6_6, 2_3_3, 2_0_0, 1_9_9, 1_7_9, 1_5_9, 1_4_0, 1_2_0, 1_0_0, 9_9, 8_8, 7_7, 6_6, 5_5, 4_4, 3_3, 2_2, 1_1, 0, ] lowerCAmelCase_: Dict = [ 9_9_9, 9_9_5, 9_9_2, 9_8_9, 9_8_5, 9_8_1, 9_7_8, 9_7_5, 9_7_1, 9_6_7, 9_6_4, 9_6_1, 9_5_7, 9_5_6, 9_5_1, 9_4_7, 9_4_2, 9_3_7, 9_3_3, 9_2_8, 9_2_3, 9_1_9, 9_1_4, 9_1_3, 9_0_8, 9_0_3, 8_9_7, 8_9_2, 8_8_7, 8_8_1, 8_7_6, 8_7_1, 8_7_0, 8_6_4, 8_5_8, 8_5_2, 8_4_6, 8_4_0, 8_3_4, 8_2_8, 8_2_7, 8_2_0, 8_1_3, 8_0_6, 7_9_9, 7_9_2, 7_8_5, 7_8_4, 7_7_7, 7_7_0, 7_6_3, 7_5_6, 7_4_9, 7_4_2, 7_4_1, 7_3_3, 7_2_4, 7_1_6, 7_0_7, 6_9_9, 6_9_8, 6_8_8, 6_7_7, 6_6_6, 6_5_6, 6_5_5, 6_4_5, 6_3_4, 6_2_3, 6_1_3, 6_1_2, 5_9_8, 5_8_4, 5_7_0, 5_6_9, 5_5_5, 5_4_1, 5_2_7, 5_2_6, 5_0_5, 4_8_4, 4_8_3, 4_6_2, 4_4_0, 4_3_9, 3_9_6, 3_9_5, 3_5_2, 3_5_1, 3_0_8, 3_0_7, 2_6_4, 2_6_3, 2_2_0, 2_1_9, 1_7_6, 1_3_2, 8_8, 4_4, 0, ] lowerCAmelCase_: Optional[int] = [ 9_9_9, 9_9_7, 9_9_5, 9_9_2, 9_9_0, 9_8_8, 9_8_6, 9_8_4, 9_8_1, 9_7_9, 9_7_7, 9_7_5, 9_7_2, 9_7_0, 9_6_8, 9_6_6, 9_6_4, 9_6_1, 9_5_9, 9_5_7, 9_5_6, 9_5_4, 9_5_1, 9_4_9, 9_4_6, 9_4_4, 9_4_1, 9_3_9, 9_3_6, 9_3_4, 9_3_1, 9_2_9, 9_2_6, 9_2_4, 9_2_1, 9_1_9, 9_1_6, 9_1_4, 9_1_3, 9_1_0, 9_0_7, 9_0_5, 9_0_2, 8_9_9, 8_9_6, 8_9_3, 8_9_1, 8_8_8, 8_8_5, 8_8_2, 8_7_9, 8_7_7, 8_7_4, 8_7_1, 8_7_0, 8_6_7, 8_6_4, 8_6_1, 8_5_8, 8_5_5, 8_5_2, 8_4_9, 8_4_6, 8_4_3, 8_4_0, 8_3_7, 8_3_4, 8_3_1, 8_2_8, 8_2_7, 8_2_4, 8_2_1, 8_1_7, 8_1_4, 8_1_1, 8_0_8, 8_0_4, 8_0_1, 7_9_8, 7_9_5, 7_9_1, 7_8_8, 7_8_5, 7_8_4, 7_8_0, 7_7_7, 7_7_4, 7_7_0, 7_6_6, 7_6_3, 7_6_0, 7_5_6, 7_5_2, 7_4_9, 7_4_6, 7_4_2, 7_4_1, 7_3_7, 7_3_3, 7_3_0, 7_2_6, 7_2_2, 7_1_8, 7_1_4, 7_1_0, 7_0_7, 7_0_3, 6_9_9, 6_9_8, 6_9_4, 6_9_0, 6_8_5, 6_8_1, 6_7_7, 6_7_3, 6_6_9, 6_6_4, 6_6_0, 6_5_6, 6_5_5, 6_5_0, 6_4_6, 6_4_1, 6_3_6, 6_3_2, 6_2_7, 6_2_2, 6_1_8, 6_1_3, 6_1_2, 6_0_7, 6_0_2, 5_9_6, 5_9_1, 5_8_6, 5_8_0, 5_7_5, 5_7_0, 5_6_9, 5_6_3, 5_5_7, 5_5_1, 5_4_5, 5_3_9, 5_3_3, 5_2_7, 5_2_6, 5_1_9, 5_1_2, 5_0_5, 4_9_8, 4_9_1, 4_8_4, 4_8_3, 4_7_4, 4_6_6, 4_5_7, 4_4_9, 4_4_0, 4_3_9, 4_2_8, 4_1_8, 4_0_7, 3_9_6, 3_9_5, 3_8_1, 3_6_6, 3_5_2, 3_5_1, 3_3_0, 3_0_8, 3_0_7, 2_8_6, 2_6_4, 2_6_3, 2_4_2, 2_2_0, 2_1_9, 1_7_6, 1_7_5, 1_3_2, 1_3_1, 8_8, 4_4, 0, ] lowerCAmelCase_: Tuple = [ 9_9_9, 9_9_1, 9_8_2, 9_7_4, 9_6_6, 9_5_8, 9_5_0, 9_4_1, 9_3_3, 9_2_5, 9_1_6, 9_0_8, 9_0_0, 8_9_9, 8_7_4, 8_5_0, 8_2_5, 8_0_0, 7_9_9, 7_0_0, 6_0_0, 5_0_0, 4_0_0, 3_0_0, 2_0_0, 1_0_0, 0, ] lowerCAmelCase_: str = [ 9_9_9, 9_9_2, 9_8_5, 9_7_8, 9_7_1, 9_6_4, 9_5_7, 9_4_9, 9_4_2, 9_3_5, 9_2_8, 9_2_1, 9_1_4, 9_0_7, 9_0_0, 8_9_9, 8_7_9, 8_5_9, 8_4_0, 8_2_0, 8_0_0, 7_9_9, 7_6_6, 7_3_3, 7_0_0, 6_9_9, 6_5_0, 6_0_0, 5_9_9, 5_0_0, 4_9_9, 4_0_0, 3_9_9, 3_0_0, 2_9_9, 2_0_0, 1_9_9, 1_0_0, 9_9, 0, ] lowerCAmelCase_: int = [ 9_9_9, 9_9_6, 9_9_2, 9_8_9, 9_8_5, 9_8_2, 9_7_9, 9_7_5, 9_7_2, 9_6_8, 9_6_5, 9_6_1, 9_5_8, 9_5_5, 9_5_1, 9_4_8, 9_4_4, 9_4_1, 9_3_8, 9_3_4, 9_3_1, 9_2_7, 9_2_4, 9_2_0, 9_1_7, 9_1_4, 9_1_0, 9_0_7, 9_0_3, 9_0_0, 8_9_9, 8_9_1, 8_8_4, 8_7_6, 8_6_9, 8_6_1, 8_5_3, 8_4_6, 8_3_8, 8_3_0, 8_2_3, 8_1_5, 8_0_8, 8_0_0, 7_9_9, 7_8_8, 7_7_7, 7_6_6, 7_5_5, 7_4_4, 7_3_3, 7_2_2, 7_1_1, 7_0_0, 6_9_9, 6_8_8, 6_7_7, 6_6_6, 6_5_5, 6_4_4, 6_3_3, 6_2_2, 6_1_1, 6_0_0, 5_9_9, 5_8_5, 5_7_1, 5_5_7, 5_4_2, 5_2_8, 5_1_4, 5_0_0, 4_9_9, 4_8_5, 4_7_1, 4_5_7, 4_4_2, 4_2_8, 4_1_4, 4_0_0, 3_9_9, 3_7_9, 3_5_9, 3_4_0, 3_2_0, 3_0_0, 2_9_9, 2_7_9, 2_5_9, 2_4_0, 2_2_0, 2_0_0, 1_9_9, 1_6_6, 1_3_3, 1_0_0, 9_9, 6_6, 3_3, 0, ]
668
1
"""simple docstring""" import os import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from huggingface_hub.file_download import http_get from requests.exceptions import HTTPError from transformers import ( AlbertTokenizer, AutoTokenizer, BertTokenizer, BertTokenizerFast, GPTaTokenizerFast, is_tokenizers_available, ) from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers from transformers.tokenization_utils import Trie sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class a__ ( unittest.TestCase ): def snake_case__ ( self ): '''simple docstring''' lowercase__ = mock.Mock() lowercase__ = 500 lowercase__ = {} lowercase__ = HTTPError lowercase__ = {} # Download this model to make sure it's in the cache. lowercase__ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("requests.Session.request", return_value=_UpperCAmelCase ) as mock_head: lowercase__ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" ) # This check we did call the fake head request mock_head.assert_called() @require_tokenizers def snake_case__ ( self ): '''simple docstring''' lowercase__ = mock.Mock() lowercase__ = 500 lowercase__ = {} lowercase__ = HTTPError lowercase__ = {} # Download this model to make sure it's in the cache. lowercase__ = GPTaTokenizerFast.from_pretrained("gpt2" ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("requests.Session.request", return_value=_UpperCAmelCase ) as mock_head: lowercase__ = GPTaTokenizerFast.from_pretrained("gpt2" ) # This check we did call the fake head request mock_head.assert_called() def snake_case__ ( self ): '''simple docstring''' try: lowercase__ = tempfile.mktemp() with open(_UpperCAmelCase, "wb" ) as f: http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model", _UpperCAmelCase ) lowercase__ = AlbertTokenizer.from_pretrained(_UpperCAmelCase ) finally: os.remove(_UpperCAmelCase ) # Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in # the current folder and have the right name. if os.path.isfile("tokenizer.json" ): # We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it. return try: with open("tokenizer.json", "wb" ) as f: http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json", _UpperCAmelCase ) lowercase__ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) # The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000 self.assertEqual(tokenizer.vocab_size, 1000 ) # Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file. finally: os.remove("tokenizer.json" ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" ) @is_staging_test class a__ ( unittest.TestCase ): snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"] @classmethod def snake_case__ ( cls ): '''simple docstring''' lowercase__ = TOKEN HfFolder.save_token(_UpperCAmelCase ) @classmethod def snake_case__ ( cls ): '''simple docstring''' try: delete_repo(token=cls._token, repo_id="test-tokenizer" ) except HTTPError: pass try: delete_repo(token=cls._token, repo_id="valid_org/test-tokenizer-org" ) except HTTPError: pass try: delete_repo(token=cls._token, repo_id="test-dynamic-tokenizer" ) except HTTPError: pass def snake_case__ ( self ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: lowercase__ = os.path.join(_UpperCAmelCase, "vocab.txt" ) with open(_UpperCAmelCase, "w", encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) ) lowercase__ = BertTokenizer(_UpperCAmelCase ) tokenizer.push_to_hub("test-tokenizer", use_auth_token=self._token ) lowercase__ = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' ) self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab ) # Reset repo delete_repo(token=self._token, repo_id="test-tokenizer" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_UpperCAmelCase, repo_id="test-tokenizer", push_to_hub=_UpperCAmelCase, use_auth_token=self._token ) lowercase__ = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' ) self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab ) def snake_case__ ( self ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: lowercase__ = os.path.join(_UpperCAmelCase, "vocab.txt" ) with open(_UpperCAmelCase, "w", encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) ) lowercase__ = BertTokenizer(_UpperCAmelCase ) tokenizer.push_to_hub("valid_org/test-tokenizer-org", use_auth_token=self._token ) lowercase__ = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" ) self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab ) # Reset repo delete_repo(token=self._token, repo_id="valid_org/test-tokenizer-org" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained( _UpperCAmelCase, repo_id="valid_org/test-tokenizer-org", push_to_hub=_UpperCAmelCase, use_auth_token=self._token ) lowercase__ = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" ) self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab ) @require_tokenizers def snake_case__ ( self ): '''simple docstring''' CustomTokenizer.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: lowercase__ = os.path.join(_UpperCAmelCase, "vocab.txt" ) with open(_UpperCAmelCase, "w", encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) ) lowercase__ = CustomTokenizer(_UpperCAmelCase ) # No fast custom tokenizer tokenizer.push_to_hub("test-dynamic-tokenizer", use_auth_token=self._token ) lowercase__ = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''', trust_remote_code=_UpperCAmelCase ) # Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module self.assertEqual(tokenizer.__class__.__name__, "CustomTokenizer" ) # Fast and slow custom tokenizer CustomTokenizerFast.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: lowercase__ = os.path.join(_UpperCAmelCase, "vocab.txt" ) with open(_UpperCAmelCase, "w", encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) ) lowercase__ = BertTokenizerFast.from_pretrained(_UpperCAmelCase ) bert_tokenizer.save_pretrained(_UpperCAmelCase ) lowercase__ = CustomTokenizerFast.from_pretrained(_UpperCAmelCase ) tokenizer.push_to_hub("test-dynamic-tokenizer", use_auth_token=self._token ) lowercase__ = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''', trust_remote_code=_UpperCAmelCase ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__, "CustomTokenizerFast" ) lowercase__ = AutoTokenizer.from_pretrained( F'''{USER}/test-dynamic-tokenizer''', use_fast=_UpperCAmelCase, trust_remote_code=_UpperCAmelCase ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__, "CustomTokenizer" ) class a__ ( unittest.TestCase ): def snake_case__ ( self ): '''simple docstring''' lowercase__ = Trie() trie.add("Hello 友達" ) self.assertEqual(trie.data, {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} ) trie.add("Hello" ) trie.data self.assertEqual(trie.data, {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = Trie() self.assertEqual(trie.split("[CLS] This is a extra_id_100" ), ["[CLS] This is a extra_id_100"] ) trie.add("[CLS]" ) trie.add("extra_id_1" ) trie.add("extra_id_100" ) self.assertEqual(trie.split("[CLS] This is a extra_id_100" ), ["[CLS]", " This is a ", "extra_id_100"] ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = Trie() trie.add("A" ) self.assertEqual(trie.split("ABC" ), ["A", "BC"] ) self.assertEqual(trie.split("BCA" ), ["BC", "A"] ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = Trie() trie.add("TOKEN]" ) trie.add("[SPECIAL_TOKEN]" ) self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ), ["This is something ", "[SPECIAL_TOKEN]"] ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = Trie() trie.add("A" ) trie.add("P" ) trie.add("[SPECIAL_TOKEN]" ) self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ), ["This is something ", "[SPECIAL_TOKEN]"] ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = Trie() trie.add("AB" ) trie.add("B" ) trie.add("C" ) self.assertEqual(trie.split("ABC" ), ["AB", "C"] ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = Trie() trie.add("ABC" ) trie.add("B" ) trie.add("CD" ) self.assertEqual(trie.split("ABCD" ), ["ABC", "D"] ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = Trie() lowercase__ = trie.cut_text("ABC", [0, 0, 2, 1, 2, 3] ) self.assertEqual(_UpperCAmelCase, ["AB", "C"] )
668
"""simple docstring""" from __future__ import annotations def __a ( A , A ): '''simple docstring''' if partitions <= 0: raise ValueError("partitions must be a positive number!" ) if partitions > number_of_bytes: raise ValueError("partitions can not > number_of_bytes!" ) lowercase__ = number_of_bytes // partitions lowercase__ = [] for i in range(A ): lowercase__ = i * bytes_per_partition + 1 lowercase__ = ( number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition ) allocation_list.append(f'''{start_bytes}-{end_bytes}''' ) return allocation_list if __name__ == "__main__": import doctest doctest.testmod()
668
1
"""simple docstring""" import string from math import logaa def __a ( A , A ): '''simple docstring''' lowercase__ = document.translate( str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" ) lowercase__ = document_without_punctuation.split(" " ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def __a ( A , A ): '''simple docstring''' lowercase__ = corpus.lower().translate( str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with '' lowercase__ = corpus_without_punctuation.split("\n" ) lowercase__ = term.lower() return (len([doc for doc in docs if term in doc] ), len(A )) def __a ( A , A , A=False ): '''simple docstring''' if smoothing: if n == 0: raise ValueError("log10(0) is undefined." ) return round(1 + logaa(n / (1 + df) ) , 3 ) if df == 0: raise ZeroDivisionError("df must be > 0" ) elif n == 0: raise ValueError("log10(0) is undefined." ) return round(logaa(n / df ) , 3 ) def __a ( A , A ): '''simple docstring''' return round(tf * idf , 3 )
668
"""simple docstring""" from collections import deque class a__ : def __init__( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' lowercase__ = process_name # process name lowercase__ = arrival_time # arrival time of the process # completion time of finished process or last interrupted time lowercase__ = arrival_time lowercase__ = burst_time # remaining burst time lowercase__ = 0 # total time of the process wait in ready queue lowercase__ = 0 # time from arrival time to completion time class a__ : def __init__( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ): '''simple docstring''' lowercase__ = number_of_queues # time slice of queues that round robin algorithm applied lowercase__ = time_slices # unfinished process is in this ready_queue lowercase__ = queue # current time lowercase__ = current_time # finished process is in this sequence queue lowercase__ = deque() def snake_case__ ( self ): '''simple docstring''' lowercase__ = [] for i in range(len(self.finish_queue ) ): sequence.append(self.finish_queue[i].process_name ) return sequence def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' lowercase__ = [] for i in range(len(_UpperCAmelCase ) ): waiting_times.append(queue[i].waiting_time ) return waiting_times def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' lowercase__ = [] for i in range(len(_UpperCAmelCase ) ): turnaround_times.append(queue[i].turnaround_time ) return turnaround_times def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' lowercase__ = [] for i in range(len(_UpperCAmelCase ) ): completion_times.append(queue[i].stop_time ) return completion_times def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' return [q.burst_time for q in queue] def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' process.waiting_time += self.current_time - process.stop_time return process.waiting_time def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' lowercase__ = deque() # sequence deque of finished process while len(_UpperCAmelCase ) != 0: lowercase__ = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of current process self.update_waiting_time(_UpperCAmelCase ) # update current time self.current_time += cp.burst_time # finish the process and set the process's burst-time 0 lowercase__ = 0 # set the process's turnaround time because it is finished lowercase__ = self.current_time - cp.arrival_time # set the completion time lowercase__ = self.current_time # add the process to queue that has finished queue finished.append(_UpperCAmelCase ) self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue # FCFS will finish all remaining processes return finished def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' lowercase__ = deque() # sequence deque of terminated process # just for 1 cycle and unfinished processes will go back to queue for _ in range(len(_UpperCAmelCase ) ): lowercase__ = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of unfinished processes self.update_waiting_time(_UpperCAmelCase ) # if the burst time of process is bigger than time-slice if cp.burst_time > time_slice: # use CPU for only time-slice self.current_time += time_slice # update remaining burst time cp.burst_time -= time_slice # update end point time lowercase__ = self.current_time # locate the process behind the queue because it is not finished ready_queue.append(_UpperCAmelCase ) else: # use CPU for remaining burst time self.current_time += cp.burst_time # set burst time 0 because the process is finished lowercase__ = 0 # set the finish time lowercase__ = self.current_time # update the process' turnaround time because it is finished lowercase__ = self.current_time - cp.arrival_time # add the process to queue that has finished queue finished.append(_UpperCAmelCase ) self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue # return finished processes queue and remaining processes queue return finished, ready_queue def snake_case__ ( self ): '''simple docstring''' for i in range(self.number_of_queues - 1 ): lowercase__ , lowercase__ = self.round_robin( self.ready_queue, self.time_slices[i] ) # the last queue has first_come_first_served algorithm self.first_come_first_served(self.ready_queue ) return self.finish_queue if __name__ == "__main__": import doctest lowerCAmelCase_: Optional[int] = Process("P1", 0, 5_3) lowerCAmelCase_: Union[str, Any] = Process("P2", 0, 1_7) lowerCAmelCase_: str = Process("P3", 0, 6_8) lowerCAmelCase_: int = Process("P4", 0, 2_4) lowerCAmelCase_: Dict = 3 lowerCAmelCase_: Any = [1_7, 2_5] lowerCAmelCase_: Tuple = deque([Pa, Pa, Pa, Pa]) if len(time_slices) != number_of_queues - 1: raise SystemExit(0) doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])}) lowerCAmelCase_: Any = Process("P1", 0, 5_3) lowerCAmelCase_: Tuple = Process("P2", 0, 1_7) lowerCAmelCase_: Optional[int] = Process("P3", 0, 6_8) lowerCAmelCase_: List[Any] = Process("P4", 0, 2_4) lowerCAmelCase_: Union[str, Any] = 3 lowerCAmelCase_: Any = [1_7, 2_5] lowerCAmelCase_: Optional[Any] = deque([Pa, Pa, Pa, Pa]) lowerCAmelCase_: Union[str, Any] = MLFQ(number_of_queues, time_slices, queue, 0) lowerCAmelCase_: Tuple = mlfq.multi_level_feedback_queue() # print total waiting times of processes(P1, P2, P3, P4) print( F'waiting time:\ \t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}' ) # print completion times of processes(P1, P2, P3, P4) print( F'completion time:\ \t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}' ) # print total turnaround times of processes(P1, P2, P3, P4) print( F'turnaround time:\ \t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}' ) # print sequence of finished processes print( F'sequence of finished processes:\ {mlfq.calculate_sequence_of_finish_queue()}' )
668
1
"""simple docstring""" import io import os import unicodedata from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase_: Dict = logging.get_logger(__name__) lowerCAmelCase_: Dict = "▁" lowerCAmelCase_: Tuple = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"} lowerCAmelCase_: Union[str, Any] = { "sentencepiece_model_file": "sentencepiece.bpe.model", "vocab_file": "vocab.txt", } lowerCAmelCase_: Union[str, Any] = { "vocab_file": { "ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt", "ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt", }, "sentencepiece_model_file": { "ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model", "ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model", }, } lowerCAmelCase_: str = { "ernie-m-base": 5_1_4, "ernie-m-large": 5_1_4, } lowerCAmelCase_: Optional[int] = { "ernie-m-base": {"do_lower_case": False}, "ernie-m-large": {"do_lower_case": False}, } class a__ ( _a ): snake_case_ = ["input_ids"] snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_INIT_CONFIGURATION snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = RESOURCE_FILES_NAMES def __init__( self, _UpperCAmelCase, _UpperCAmelCase=None, _UpperCAmelCase=False, _UpperCAmelCase="utf8", _UpperCAmelCase="[UNK]", _UpperCAmelCase="[SEP]", _UpperCAmelCase="[PAD]", _UpperCAmelCase="[CLS]", _UpperCAmelCase="[MASK]", _UpperCAmelCase = None, **_UpperCAmelCase, ): '''simple docstring''' lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=_UpperCAmelCase, unk_token=_UpperCAmelCase, sep_token=_UpperCAmelCase, pad_token=_UpperCAmelCase, cls_token=_UpperCAmelCase, mask_token=_UpperCAmelCase, vocab_file=_UpperCAmelCase, encoding=_UpperCAmelCase, sp_model_kwargs=self.sp_model_kwargs, **_UpperCAmelCase, ) lowercase__ = do_lower_case lowercase__ = sentencepiece_model_ckpt lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_UpperCAmelCase ) # to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning if vocab_file is not None: lowercase__ = self.load_vocab(filepath=_UpperCAmelCase ) else: lowercase__ = {self.sp_model.id_to_piece(_UpperCAmelCase ): id for id in range(self.sp_model.get_piece_size() )} lowercase__ = {v: k for k, v in self.vocab.items()} def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' if text is None: return None lowercase__ = self.tokenize(_UpperCAmelCase ) lowercase__ , lowercase__ = "", [] for i, ch in enumerate(_UpperCAmelCase ): if ch in self.SP_CHAR_MAPPING: lowercase__ = self.SP_CHAR_MAPPING.get(_UpperCAmelCase ) else: lowercase__ = unicodedata.normalize("NFKC", _UpperCAmelCase ) if self.is_whitespace(_UpperCAmelCase ): continue normalized_text += ch char_mapping.extend([i] * len(_UpperCAmelCase ) ) lowercase__ , lowercase__ , lowercase__ = normalized_text, [], 0 if self.do_lower_case: lowercase__ = text.lower() for token in split_tokens: if token[:1] == "▁": lowercase__ = token[1:] lowercase__ = text[offset:].index(_UpperCAmelCase ) + offset lowercase__ = start + len(_UpperCAmelCase ) token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) ) lowercase__ = end return token_mapping @property def snake_case__ ( self ): '''simple docstring''' return len(self.vocab ) def snake_case__ ( self ): '''simple docstring''' return dict(self.vocab, **self.added_tokens_encoder ) def __getstate__( self ): '''simple docstring''' lowercase__ = self.__dict__.copy() lowercase__ = None return state def __setstate__( self, _UpperCAmelCase ): '''simple docstring''' lowercase__ = d # for backward compatibility if not hasattr(self, "sp_model_kwargs" ): lowercase__ = {} lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.sentencepiece_model_ckpt ) def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' return "".join((self.SP_CHAR_MAPPING.get(_UpperCAmelCase, _UpperCAmelCase ) for c in text) ) def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase=False, _UpperCAmelCase=64, _UpperCAmelCase=0.1 ): '''simple docstring''' if self.sp_model_kwargs.get("enable_sampling" ) is True: lowercase__ = True if self.sp_model_kwargs.get("alpha" ) is not None: lowercase__ = self.sp_model_kwargs.get("alpha" ) if self.sp_model_kwargs.get("nbest_size" ) is not None: lowercase__ = self.sp_model_kwargs.get("nbest_size" ) if not enable_sampling: lowercase__ = self.sp_model.EncodeAsPieces(_UpperCAmelCase ) else: lowercase__ = self.sp_model.SampleEncodeAsPieces(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) lowercase__ = [] for pi, piece in enumerate(_UpperCAmelCase ): if piece == SPIECE_UNDERLINE: if not pieces[pi + 1].startswith(_UpperCAmelCase ) and pi != 0: new_pieces.append(_UpperCAmelCase ) continue else: continue lowercase__ = 0 for i, chunk in enumerate(_UpperCAmelCase ): if chunk == SPIECE_UNDERLINE: continue if self.is_ch_char(_UpperCAmelCase ) or self.is_punct(_UpperCAmelCase ): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) new_pieces.append(_UpperCAmelCase ) lowercase__ = i + 1 elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) lowercase__ = i elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) lowercase__ = i if len(_UpperCAmelCase ) > lst_i: new_pieces.append(piece[lst_i:] ) return new_pieces def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' lowercase__ = "".join(_UpperCAmelCase ).replace(_UpperCAmelCase, " " ).strip() return out_string def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' lowercase__ = self.convert_ids_to_tokens(_UpperCAmelCase ) lowercase__ = "".join(_UpperCAmelCase ).replace(_UpperCAmelCase, " " ).strip() return out_string def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' return self.vocab.get(_UpperCAmelCase, self.vocab.get(self.unk_token ) ) def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' return self.reverse_vocab.get(_UpperCAmelCase, self.unk_token ) def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase=None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowercase__ = [self.cls_token_id] lowercase__ = [self.sep_token_id] return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase=None ): '''simple docstring''' if offset_mapping_a is None: return [(0, 0)] + offset_mapping_a + [(0, 0)] return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)] def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase=None, _UpperCAmelCase=False ): '''simple docstring''' if already_has_special_tokens: if token_ids_a is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] + ([0] * len(_UpperCAmelCase )) + [1] return [1] + ([0] * len(_UpperCAmelCase )) + [1] def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase = None ): '''simple docstring''' if token_ids_a is None: # [CLS] X [SEP] return (len(_UpperCAmelCase ) + 2) * [0] # [CLS] A [SEP] [SEP] B [SEP] return [0] * (len(_UpperCAmelCase ) + 1) + [1] * (len(_UpperCAmelCase ) + 3) def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' if "\u4e00" <= char <= "\u9fff": return True return False def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' if ("a" <= char <= "z") or ("A" <= char <= "Z"): return True return False def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' if char in ",;:.?!~,;:。?!《》【】": return True return False def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' if char == " " or char == "\t" or char == "\n" or char == "\r": return True if len(_UpperCAmelCase ) == 1: lowercase__ = unicodedata.category(_UpperCAmelCase ) if cat == "Zs": return True return False def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' lowercase__ = {} with io.open(_UpperCAmelCase, "r", encoding="utf-8" ) as f: for index, line in enumerate(_UpperCAmelCase ): lowercase__ = line.rstrip("\n" ) lowercase__ = int(_UpperCAmelCase ) return token_to_idx def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase = None ): '''simple docstring''' lowercase__ = 0 if os.path.isdir(_UpperCAmelCase ): lowercase__ = os.path.join( _UpperCAmelCase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) else: lowercase__ = (filename_prefix + "-" if filename_prefix else "") + save_directory with open(_UpperCAmelCase, "w", encoding="utf-8" ) as writer: for token, token_index in sorted(self.vocab.items(), key=lambda _UpperCAmelCase : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.''' " Please check that the vocabulary is not corrupted!" ) lowercase__ = token_index writer.write(token + "\n" ) index += 1 lowercase__ = os.path.join(_UpperCAmelCase, "sentencepiece.bpe.model" ) with open(_UpperCAmelCase, "wb" ) as fi: lowercase__ = self.sp_model.serialized_model_proto() fi.write(_UpperCAmelCase ) return (vocab_file,)
668
"""simple docstring""" import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, ByTaTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): lowerCAmelCase_: Dict = "pt" elif is_tf_available(): lowerCAmelCase_: Dict = "tf" else: lowerCAmelCase_: str = "jax" class a__ ( _a , unittest.TestCase ): snake_case_ = ByTaTokenizer snake_case_ = False def snake_case__ ( self ): '''simple docstring''' super().setUp() lowercase__ = ByTaTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def snake_case__ ( self ): '''simple docstring''' return ByTaTokenizer.from_pretrained("google/byt5-small" ) def snake_case__ ( self, **_UpperCAmelCase ): '''simple docstring''' return self.tokenizer_class.from_pretrained(self.tmpdirname, **_UpperCAmelCase ) def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase=False, _UpperCAmelCase=20, _UpperCAmelCase=5 ): '''simple docstring''' lowercase__ = [] for i in range(len(_UpperCAmelCase ) ): try: lowercase__ = tokenizer.decode([i], clean_up_tokenization_spaces=_UpperCAmelCase ) except UnicodeDecodeError: pass toks.append((i, tok) ) lowercase__ = list(filter(lambda _UpperCAmelCase : re.match(R"^[ a-zA-Z]+$", t[1] ), _UpperCAmelCase ) ) lowercase__ = list(filter(lambda _UpperCAmelCase : [t[0]] == tokenizer.encode(t[1], add_special_tokens=_UpperCAmelCase ), _UpperCAmelCase ) ) if max_length is not None and len(_UpperCAmelCase ) > max_length: lowercase__ = toks[:max_length] if min_length is not None and len(_UpperCAmelCase ) < min_length and len(_UpperCAmelCase ) > 0: while len(_UpperCAmelCase ) < min_length: lowercase__ = toks + toks # toks_str = [t[1] for t in toks] lowercase__ = [t[0] for t in toks] # Ensure consistency lowercase__ = tokenizer.decode(_UpperCAmelCase, clean_up_tokenization_spaces=_UpperCAmelCase ) if " " not in output_txt and len(_UpperCAmelCase ) > 1: lowercase__ = ( tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=_UpperCAmelCase ) + " " + tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=_UpperCAmelCase ) ) if with_prefix_space: lowercase__ = " " + output_txt lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) return output_txt, output_ids def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.ta_base_tokenizer lowercase__ = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"] ) lowercase__ = tokenizer(["hi", "I went to the gym", ""] ) self.assertListEqual(batch_with_eos_added["input_ids"], batch_without_eos_added["input_ids"] ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.ta_base_tokenizer lowercase__ = "Unicode €." lowercase__ = tokenizer(_UpperCAmelCase ) lowercase__ = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1] self.assertEqual(encoded["input_ids"], _UpperCAmelCase ) # decoding lowercase__ = tokenizer.decode(_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase, "Unicode €.</s>" ) lowercase__ = tokenizer("e è é ê ë" ) lowercase__ = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1] self.assertEqual(encoded["input_ids"], _UpperCAmelCase ) # decoding lowercase__ = tokenizer.decode(_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase, "e è é ê ë</s>" ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ), "e è é ê ë</s>" ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.ta_base_tokenizer lowercase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."] # fmt: off lowercase__ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0] # fmt: on lowercase__ = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, return_tensors=_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase ) if FRAMEWORK != "jax": lowercase__ = list(batch.input_ids.numpy()[0] ) else: lowercase__ = list(batch.input_ids.tolist()[0] ) self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase ) self.assertEqual((2, 37), batch.input_ids.shape ) self.assertEqual((2, 37), batch.attention_mask.shape ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.ta_base_tokenizer lowercase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."] lowercase__ = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, return_tensors=_UpperCAmelCase ) # check if input_ids are returned and no decoder_input_ids self.assertIn("input_ids", _UpperCAmelCase ) self.assertIn("attention_mask", _UpperCAmelCase ) self.assertNotIn("decoder_input_ids", _UpperCAmelCase ) self.assertNotIn("decoder_attention_mask", _UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.ta_base_tokenizer lowercase__ = [ "Summary of the text.", "Another summary.", ] lowercase__ = tokenizer( text_target=_UpperCAmelCase, max_length=32, padding="max_length", truncation=_UpperCAmelCase, return_tensors=_UpperCAmelCase ) self.assertEqual(32, targets["input_ids"].shape[1] ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.ta_base_tokenizer lowercase__ = ["A long paragraph for summarization. </s>"] lowercase__ = ["Summary of the text. </s>"] # fmt: off lowercase__ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1] lowercase__ = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1] # fmt: on lowercase__ = tokenizer(_UpperCAmelCase, text_target=_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase, batch["input_ids"][0] ) self.assertEqual(_UpperCAmelCase, batch["labels"][0] ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length, 42 ) # Now let's start the test lowercase__ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc lowercase__ = tempfile.mkdtemp() lowercase__ = " He is very happy, UNwant\u00E9d,running" lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) tokenizer.save_pretrained(_UpperCAmelCase ) lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase ) lowercase__ = after_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase ) shutil.rmtree(_UpperCAmelCase ) lowercase__ = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc lowercase__ = tempfile.mkdtemp() lowercase__ = " He is very happy, UNwant\u00E9d,running" tokenizer.add_tokens(["bim", "bambam"] ) lowercase__ = tokenizer.additional_special_tokens additional_special_tokens.append("new_additional_special_token" ) tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} ) lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) tokenizer.save_pretrained(_UpperCAmelCase ) lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase ) lowercase__ = after_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase ) self.assertIn("new_additional_special_token", after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length, 42 ) lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase, model_max_length=43 ) self.assertEqual(tokenizer.model_max_length, 43 ) shutil.rmtree(_UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(_UpperCAmelCase ) with open(os.path.join(_UpperCAmelCase, "special_tokens_map.json" ), encoding="utf-8" ) as json_file: lowercase__ = json.load(_UpperCAmelCase ) with open(os.path.join(_UpperCAmelCase, "tokenizer_config.json" ), encoding="utf-8" ) as json_file: lowercase__ = json.load(_UpperCAmelCase ) lowercase__ = [F'''<extra_id_{i}>''' for i in range(125 )] lowercase__ = added_tokens_extra_ids + [ "an_additional_special_token" ] lowercase__ = added_tokens_extra_ids + [ "an_additional_special_token" ] with open(os.path.join(_UpperCAmelCase, "special_tokens_map.json" ), "w", encoding="utf-8" ) as outfile: json.dump(_UpperCAmelCase, _UpperCAmelCase ) with open(os.path.join(_UpperCAmelCase, "tokenizer_config.json" ), "w", encoding="utf-8" ) as outfile: json.dump(_UpperCAmelCase, _UpperCAmelCase ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files lowercase__ = tokenizer_class.from_pretrained( _UpperCAmelCase, ) self.assertIn( "an_additional_special_token", tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( ["an_additional_special_token"], tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ), ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained lowercase__ = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token", lstrip=_UpperCAmelCase )] lowercase__ = tokenizer_class.from_pretrained( _UpperCAmelCase, additional_special_tokens=_UpperCAmelCase, ) self.assertIn("a_new_additional_special_token", tokenizer.additional_special_tokens ) self.assertEqual( ["a_new_additional_special_token"], tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ), ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(_UpperCAmelCase ) lowercase__ = tokenizer_class.from_pretrained(_UpperCAmelCase ) self.assertTrue(tokenizer.decode([255] ) == "" ) def snake_case__ ( self ): '''simple docstring''' pass def snake_case__ ( self ): '''simple docstring''' pass def snake_case__ ( self ): '''simple docstring''' pass def snake_case__ ( self ): '''simple docstring''' pass def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.get_tokenizers(fast=_UpperCAmelCase, do_lower_case=_UpperCAmelCase ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): lowercase__ = ["t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "x", "t", "</s>"] lowercase__ = tokenizer.convert_tokens_to_string(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): lowercase__ = [ "bos_token", "eos_token", "unk_token", "sep_token", "pad_token", "cls_token", "mask_token", ] lowercase__ = 0 lowercase__ = tokenizer.convert_ids_to_tokens( _UpperCAmelCase, skip_special_tokens=_UpperCAmelCase ) for attr in attributes_list: setattr(_UpperCAmelCase, attr + "_id", _UpperCAmelCase ) self.assertEqual(getattr(_UpperCAmelCase, _UpperCAmelCase ), _UpperCAmelCase ) self.assertEqual(getattr(_UpperCAmelCase, attr + "_id" ), _UpperCAmelCase ) setattr(_UpperCAmelCase, attr + "_id", _UpperCAmelCase ) self.assertEqual(getattr(_UpperCAmelCase, _UpperCAmelCase ), _UpperCAmelCase ) self.assertEqual(getattr(_UpperCAmelCase, attr + "_id" ), _UpperCAmelCase ) setattr(_UpperCAmelCase, "additional_special_tokens_ids", [] ) self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens" ), [] ) self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens_ids" ), [] ) setattr(_UpperCAmelCase, "additional_special_tokens_ids", [token_id_to_test_setters] ) self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens" ), [token_to_test_setters] ) self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens_ids" ), [token_id_to_test_setters] )
668
1
"""simple docstring""" import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class a__ ( _a ): snake_case_ = (PNDMScheduler,) snake_case_ = (("num_inference_steps", 50),) def snake_case__ ( self, **_UpperCAmelCase ): '''simple docstring''' lowercase__ = { "num_train_timesteps": 1000, "beta_start": 0.0_001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**_UpperCAmelCase ) return config def snake_case__ ( self, _UpperCAmelCase=0, **_UpperCAmelCase ): '''simple docstring''' lowercase__ = dict(self.forward_default_kwargs ) lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase ) lowercase__ = self.dummy_sample lowercase__ = 0.1 * sample lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: lowercase__ = self.get_scheduler_config(**_UpperCAmelCase ) lowercase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals lowercase__ = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_UpperCAmelCase ) lowercase__ = scheduler_class.from_pretrained(_UpperCAmelCase ) new_scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals lowercase__ = dummy_past_residuals[:] lowercase__ = scheduler.step_prk(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample lowercase__ = new_scheduler.step_prk(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" lowercase__ = scheduler.step_plms(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample lowercase__ = new_scheduler.step_plms(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def snake_case__ ( self ): '''simple docstring''' pass def snake_case__ ( self, _UpperCAmelCase=0, **_UpperCAmelCase ): '''simple docstring''' lowercase__ = dict(self.forward_default_kwargs ) lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase ) lowercase__ = self.dummy_sample lowercase__ = 0.1 * sample lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: lowercase__ = self.get_scheduler_config() lowercase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals (must be after setting timesteps) lowercase__ = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_UpperCAmelCase ) lowercase__ = scheduler_class.from_pretrained(_UpperCAmelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residual (must be after setting timesteps) lowercase__ = dummy_past_residuals[:] lowercase__ = scheduler.step_prk(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample lowercase__ = new_scheduler.step_prk(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" lowercase__ = scheduler.step_plms(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample lowercase__ = new_scheduler.step_plms(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def snake_case__ ( self, **_UpperCAmelCase ): '''simple docstring''' lowercase__ = self.scheduler_classes[0] lowercase__ = self.get_scheduler_config(**_UpperCAmelCase ) lowercase__ = scheduler_class(**_UpperCAmelCase ) lowercase__ = 10 lowercase__ = self.dummy_model() lowercase__ = self.dummy_sample_deter scheduler.set_timesteps(_UpperCAmelCase ) for i, t in enumerate(scheduler.prk_timesteps ): lowercase__ = model(_UpperCAmelCase, _UpperCAmelCase ) lowercase__ = scheduler.step_prk(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): lowercase__ = model(_UpperCAmelCase, _UpperCAmelCase ) lowercase__ = scheduler.step_plms(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ).prev_sample return sample def snake_case__ ( self ): '''simple docstring''' lowercase__ = dict(self.forward_default_kwargs ) lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase ) for scheduler_class in self.scheduler_classes: lowercase__ = self.get_scheduler_config() lowercase__ = scheduler_class(**_UpperCAmelCase ) lowercase__ = self.dummy_sample lowercase__ = 0.1 * sample if num_inference_steps is not None and hasattr(_UpperCAmelCase, "set_timesteps" ): scheduler.set_timesteps(_UpperCAmelCase ) elif num_inference_steps is not None and not hasattr(_UpperCAmelCase, "set_timesteps" ): lowercase__ = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] lowercase__ = dummy_past_residuals[:] lowercase__ = scheduler.step_prk(_UpperCAmelCase, 0, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample lowercase__ = scheduler.step_prk(_UpperCAmelCase, 1, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample self.assertEqual(output_a.shape, sample.shape ) self.assertEqual(output_a.shape, output_a.shape ) lowercase__ = scheduler.step_plms(_UpperCAmelCase, 0, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample lowercase__ = scheduler.step_plms(_UpperCAmelCase, 1, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample self.assertEqual(output_a.shape, sample.shape ) self.assertEqual(output_a.shape, output_a.shape ) def snake_case__ ( self ): '''simple docstring''' for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=_UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' for steps_offset in [0, 1]: self.check_over_configs(steps_offset=_UpperCAmelCase ) lowercase__ = self.scheduler_classes[0] lowercase__ = self.get_scheduler_config(steps_offset=1 ) lowercase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(10 ) assert torch.equal( scheduler.timesteps, torch.LongTensor( [901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ), ) def snake_case__ ( self ): '''simple docstring''' for beta_start, beta_end in zip([0.0_001, 0.001], [0.002, 0.02] ): self.check_over_configs(beta_start=_UpperCAmelCase, beta_end=_UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' for t in [1, 5, 10]: self.check_over_forward(time_step=_UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100] ): self.check_over_forward(num_inference_steps=_UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = 27 for scheduler_class in self.scheduler_classes: lowercase__ = self.dummy_sample lowercase__ = 0.1 * sample lowercase__ = self.get_scheduler_config() lowercase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): lowercase__ = scheduler.step_prk(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ).prev_sample def snake_case__ ( self ): '''simple docstring''' with self.assertRaises(_UpperCAmelCase ): lowercase__ = self.scheduler_classes[0] lowercase__ = self.get_scheduler_config() lowercase__ = scheduler_class(**_UpperCAmelCase ) scheduler.step_plms(self.dummy_sample, 1, self.dummy_sample ).prev_sample def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.full_loop() lowercase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) lowercase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 198.1_318 ) < 1E-2 assert abs(result_mean.item() - 0.2_580 ) < 1E-3 def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.full_loop(prediction_type="v_prediction" ) lowercase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) lowercase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 67.3_986 ) < 1E-2 assert abs(result_mean.item() - 0.0_878 ) < 1E-3 def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.full_loop(set_alpha_to_one=_UpperCAmelCase, beta_start=0.01 ) lowercase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) lowercase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 230.0_399 ) < 1E-2 assert abs(result_mean.item() - 0.2_995 ) < 1E-3 def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.full_loop(set_alpha_to_one=_UpperCAmelCase, beta_start=0.01 ) lowercase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) lowercase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 186.9_482 ) < 1E-2 assert abs(result_mean.item() - 0.2_434 ) < 1E-3
668
"""simple docstring""" import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class a__ ( unittest.TestCase ): snake_case_ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' lowercase__ = hf_hub_download( repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset" ) lowercase__ = VideoClassificationPipeline(model=_UpperCAmelCase, image_processor=_UpperCAmelCase, top_k=2 ) lowercase__ = [ example_video_filepath, "https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4", ] return video_classifier, examples def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' for example in examples: lowercase__ = video_classifier(_UpperCAmelCase ) self.assertEqual( _UpperCAmelCase, [ {"score": ANY(_UpperCAmelCase ), "label": ANY(_UpperCAmelCase )}, {"score": ANY(_UpperCAmelCase ), "label": ANY(_UpperCAmelCase )}, ], ) @require_torch def snake_case__ ( self ): '''simple docstring''' lowercase__ = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification" lowercase__ = VideoMAEFeatureExtractor( size={"shortest_edge": 10}, crop_size={"height": 10, "width": 10} ) lowercase__ = pipeline( "video-classification", model=_UpperCAmelCase, feature_extractor=_UpperCAmelCase, frame_sampling_rate=4 ) lowercase__ = hf_hub_download(repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset" ) lowercase__ = video_classifier(_UpperCAmelCase, top_k=2 ) self.assertEqual( nested_simplify(_UpperCAmelCase, decimals=4 ), [{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}], ) lowercase__ = video_classifier( [ video_file_path, video_file_path, ], top_k=2, ) self.assertEqual( nested_simplify(_UpperCAmelCase, decimals=4 ), [ [{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}], [{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}], ], ) @require_tf def snake_case__ ( self ): '''simple docstring''' pass
668
1
"""simple docstring""" from collections import Counter from timeit import timeit def __a ( A = "" , ): '''simple docstring''' return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2 def __a ( A = "" ): '''simple docstring''' if len(A ) == 0: return True lowercase__ = input_str.replace(" " , "" ).lower() # character_freq_dict: Stores the frequency of every character in the input string lowercase__ = {} for character in lower_case_input_str: lowercase__ = character_freq_dict.get(A , 0 ) + 1 lowercase__ = 0 for character_count in character_freq_dict.values(): if character_count % 2: odd_char += 1 if odd_char > 1: return False return True def __a ( A = "" ): '''simple docstring''' print("\nFor string = " , A , ":" ) print( "> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(A ) , "\ttime =" , timeit( "z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , ) print( "> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(A ) , "\ttime =" , timeit( "z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , ) if __name__ == "__main__": lowerCAmelCase_: Dict = input( "Enter string to determine if it can be rearranged as a palindrome or not: " ).strip() benchmark(check_str) lowerCAmelCase_: Union[str, Any] = can_string_be_rearranged_as_palindrome_counter(check_str) print(F'{check_str} can {"" if status else "not "}be rearranged as a palindrome')
668
"""simple docstring""" import itertools import math def __a ( A ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __a ( ): '''simple docstring''' lowercase__ = 2 while True: if is_prime(A ): yield num num += 1 def __a ( A = 1_00_01 ): '''simple docstring''' return next(itertools.islice(prime_generator() , nth - 1 , A ) ) if __name__ == "__main__": print(F'{solution() = }')
668
1
"""simple docstring""" class a__ : def __init__( self ): '''simple docstring''' lowercase__ = {} def snake_case__ ( self ): '''simple docstring''' print(self.vertex ) for i in self.vertex: print(_UpperCAmelCase, " -> ", " -> ".join([str(_UpperCAmelCase ) for j in self.vertex[i]] ) ) def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' if from_vertex in self.vertex: self.vertex[from_vertex].append(_UpperCAmelCase ) else: # else make a new vertex lowercase__ = [to_vertex] def snake_case__ ( self ): '''simple docstring''' lowercase__ = [False] * len(self.vertex ) # call the recursive helper function for i in range(len(self.vertex ) ): if not visited[i]: self.dfs_recursive(_UpperCAmelCase, _UpperCAmelCase ) def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' lowercase__ = True print(_UpperCAmelCase, end=" " ) # Recur for all the vertices that are adjacent to this node for i in self.vertex: if not visited[i]: self.dfs_recursive(_UpperCAmelCase, _UpperCAmelCase ) if __name__ == "__main__": lowerCAmelCase_: Union[str, Any] = Graph() g.add_edge(0, 1) g.add_edge(0, 2) g.add_edge(1, 2) g.add_edge(2, 0) g.add_edge(2, 3) g.add_edge(3, 3) g.print_graph() print("DFS:") g.dfs() # OUTPUT: # 0 -> 1 -> 2 # 1 -> 2 # 2 -> 0 -> 3 # 3 -> 3 # DFS: # 0 1 2 3
668
"""simple docstring""" from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class a__ ( _a ): def __init__( self, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = False, _UpperCAmelCase = False, _UpperCAmelCase = None, **_UpperCAmelCase, ): '''simple docstring''' super().__init__( _UpperCAmelCase, split=_UpperCAmelCase, features=_UpperCAmelCase, cache_dir=_UpperCAmelCase, keep_in_memory=_UpperCAmelCase, streaming=_UpperCAmelCase, num_proc=_UpperCAmelCase, **_UpperCAmelCase, ) lowercase__ = path_or_paths if isinstance(_UpperCAmelCase, _UpperCAmelCase ) else {self.split: path_or_paths} lowercase__ = Text( cache_dir=_UpperCAmelCase, data_files=_UpperCAmelCase, features=_UpperCAmelCase, **_UpperCAmelCase, ) def snake_case__ ( self ): '''simple docstring''' if self.streaming: lowercase__ = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: lowercase__ = None lowercase__ = None lowercase__ = None lowercase__ = None self.builder.download_and_prepare( download_config=_UpperCAmelCase, download_mode=_UpperCAmelCase, verification_mode=_UpperCAmelCase, base_path=_UpperCAmelCase, num_proc=self.num_proc, ) lowercase__ = self.builder.as_dataset( split=self.split, verification_mode=_UpperCAmelCase, in_memory=self.keep_in_memory ) return dataset
668
1
"""simple docstring""" import json import os import unittest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class a__ ( _a , unittest.TestCase ): snake_case_ = MgpstrTokenizer snake_case_ = False snake_case_ = {} snake_case_ = False def snake_case__ ( self ): '''simple docstring''' super().setUp() # fmt: off lowercase__ = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"] # fmt: on lowercase__ = dict(zip(_UpperCAmelCase, range(len(_UpperCAmelCase ) ) ) ) lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file, "w", encoding="utf-8" ) as fp: fp.write(json.dumps(_UpperCAmelCase ) + "\n" ) def snake_case__ ( self, **_UpperCAmelCase ): '''simple docstring''' return MgpstrTokenizer.from_pretrained(self.tmpdirname, **_UpperCAmelCase ) def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' lowercase__ = "tester" lowercase__ = "tester" return input_text, output_text @unittest.skip("MGP-STR always lower cases letters." ) def snake_case__ ( self ): '''simple docstring''' pass def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.get_tokenizers(do_lower_case=_UpperCAmelCase ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): lowercase__ = "[SPECIAL_TOKEN]" tokenizer.add_special_tokens({"cls_token": special_token} ) lowercase__ = tokenizer.encode([special_token], add_special_tokens=_UpperCAmelCase ) self.assertEqual(len(_UpperCAmelCase ), 1 ) lowercase__ = tokenizer.decode(_UpperCAmelCase, skip_special_tokens=_UpperCAmelCase ) self.assertTrue(special_token not in decoded ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): lowercase__ , lowercase__ = self.get_input_output_texts(_UpperCAmelCase ) lowercase__ = tokenizer.tokenize(_UpperCAmelCase ) lowercase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) self.assertNotEqual(len(_UpperCAmelCase ), 0 ) lowercase__ = tokenizer.decode(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase ) self.assertEqual(text_a.replace(" ", "" ), _UpperCAmelCase ) @unittest.skip("MGP-STR tokenizer only handles one sequence." ) def snake_case__ ( self ): '''simple docstring''' pass @unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" ) def snake_case__ ( self ): '''simple docstring''' pass
668
"""simple docstring""" import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler lowerCAmelCase_: List[str] = 1_6 lowerCAmelCase_: Optional[Any] = 3_2 def __a ( A , A = 16 , A = "bert-base-cased" ): '''simple docstring''' lowercase__ = AutoTokenizer.from_pretrained(A ) lowercase__ = load_dataset("glue" , "mrpc" ) def tokenize_function(A ): # max_length=None => use the model max length (it's actually the default) lowercase__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=A , max_length=A ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset lowercase__ = datasets.map( A , batched=A , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=A ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowercase__ = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(A ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(A , padding="max_length" , max_length=1_28 , return_tensors="pt" ) return tokenizer.pad(A , padding="longest" , return_tensors="pt" ) # Instantiate dataloaders. lowercase__ = DataLoader( tokenized_datasets["train"] , shuffle=A , collate_fn=A , batch_size=A ) lowercase__ = DataLoader( tokenized_datasets["validation"] , shuffle=A , collate_fn=A , batch_size=A ) return train_dataloader, eval_dataloader def __a ( A , A ): '''simple docstring''' lowercase__ = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowercase__ = config["lr"] lowercase__ = int(config["num_epochs"] ) lowercase__ = int(config["seed"] ) lowercase__ = int(config["batch_size"] ) lowercase__ = args.model_name_or_path set_seed(A ) lowercase__ , lowercase__ = get_dataloaders(A , A , A ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowercase__ = AutoModelForSequenceClassification.from_pretrained(A , return_dict=A ) # Instantiate optimizer lowercase__ = ( AdamW if accelerator.state.deepspeed_plugin is None or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) lowercase__ = optimizer_cls(params=model.parameters() , lr=A ) if accelerator.state.deepspeed_plugin is not None: lowercase__ = accelerator.state.deepspeed_plugin.deepspeed_config[ "gradient_accumulation_steps" ] else: lowercase__ = 1 lowercase__ = (len(A ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): lowercase__ = get_linear_schedule_with_warmup( optimizer=A , num_warmup_steps=0 , num_training_steps=A , ) else: lowercase__ = DummyScheduler(A , total_num_steps=A , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare( A , A , A , A , A ) # We need to keep track of how many total steps we have iterated over lowercase__ = 0 # We also need to keep track of the stating epoch so files are named properly lowercase__ = 0 # Now we train the model lowercase__ = evaluate.load("glue" , "mrpc" ) lowercase__ = 0 lowercase__ = {} for epoch in range(A , A ): model.train() for step, batch in enumerate(A ): lowercase__ = model(**A ) lowercase__ = outputs.loss lowercase__ = loss / gradient_accumulation_steps accelerator.backward(A ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() lowercase__ = 0 for step, batch in enumerate(A ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowercase__ = model(**A ) lowercase__ = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times lowercase__ , lowercase__ = accelerator.gather( (predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(A ) - 1: lowercase__ = predictions[: len(eval_dataloader.dataset ) - samples_seen] lowercase__ = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=A , references=A , ) lowercase__ = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}:''' , A ) lowercase__ = eval_metric["accuracy"] if best_performance < eval_metric["accuracy"]: lowercase__ = eval_metric["accuracy"] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}''' accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , "all_results.json" ) , "w" ) as f: json.dump(A , A ) def __a ( ): '''simple docstring''' lowercase__ = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." ) parser.add_argument( "--model_name_or_path" , type=A , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=A , ) parser.add_argument( "--output_dir" , type=A , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , ) parser.add_argument( "--performance_lower_bound" , type=A , default=A , help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value." , ) parser.add_argument( "--num_epochs" , type=A , default=3 , help="Number of train epochs." , ) lowercase__ = parser.parse_args() lowercase__ = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16} training_function(A , A ) if __name__ == "__main__": main()
668
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_: List[Any] = logging.get_logger(__name__) lowerCAmelCase_: int = { "microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json", "microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json", } class a__ ( _a ): snake_case_ = "markuplm" def __init__( self, _UpperCAmelCase=3_0522, _UpperCAmelCase=768, _UpperCAmelCase=12, _UpperCAmelCase=12, _UpperCAmelCase=3072, _UpperCAmelCase="gelu", _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=512, _UpperCAmelCase=2, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-12, _UpperCAmelCase=0, _UpperCAmelCase=0, _UpperCAmelCase=2, _UpperCAmelCase=256, _UpperCAmelCase=1024, _UpperCAmelCase=216, _UpperCAmelCase=1001, _UpperCAmelCase=32, _UpperCAmelCase=50, _UpperCAmelCase="absolute", _UpperCAmelCase=True, _UpperCAmelCase=None, **_UpperCAmelCase, ): '''simple docstring''' super().__init__( pad_token_id=_UpperCAmelCase, bos_token_id=_UpperCAmelCase, eos_token_id=_UpperCAmelCase, **_UpperCAmelCase, ) lowercase__ = vocab_size lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = hidden_act lowercase__ = intermediate_size lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = max_position_embeddings lowercase__ = type_vocab_size lowercase__ = initializer_range lowercase__ = layer_norm_eps lowercase__ = position_embedding_type lowercase__ = use_cache lowercase__ = classifier_dropout # additional properties lowercase__ = max_depth lowercase__ = max_xpath_tag_unit_embeddings lowercase__ = max_xpath_subs_unit_embeddings lowercase__ = tag_pad_id lowercase__ = subs_pad_id lowercase__ = xpath_unit_hidden_size
668
"""simple docstring""" import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class a__ ( _a ): snake_case_ = (IPNDMScheduler,) snake_case_ = (("num_inference_steps", 50),) def snake_case__ ( self, **_UpperCAmelCase ): '''simple docstring''' lowercase__ = {"num_train_timesteps": 1000} config.update(**_UpperCAmelCase ) return config def snake_case__ ( self, _UpperCAmelCase=0, **_UpperCAmelCase ): '''simple docstring''' lowercase__ = dict(self.forward_default_kwargs ) lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase ) lowercase__ = self.dummy_sample lowercase__ = 0.1 * sample lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: lowercase__ = self.get_scheduler_config(**_UpperCAmelCase ) lowercase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals lowercase__ = dummy_past_residuals[:] if time_step is None: lowercase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_UpperCAmelCase ) lowercase__ = scheduler_class.from_pretrained(_UpperCAmelCase ) new_scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals lowercase__ = dummy_past_residuals[:] lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def snake_case__ ( self ): '''simple docstring''' pass def snake_case__ ( self, _UpperCAmelCase=0, **_UpperCAmelCase ): '''simple docstring''' lowercase__ = dict(self.forward_default_kwargs ) lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase ) lowercase__ = self.dummy_sample lowercase__ = 0.1 * sample lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: lowercase__ = self.get_scheduler_config() lowercase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals (must be after setting timesteps) lowercase__ = dummy_past_residuals[:] if time_step is None: lowercase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_UpperCAmelCase ) lowercase__ = scheduler_class.from_pretrained(_UpperCAmelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residual (must be after setting timesteps) lowercase__ = dummy_past_residuals[:] lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def snake_case__ ( self, **_UpperCAmelCase ): '''simple docstring''' lowercase__ = self.scheduler_classes[0] lowercase__ = self.get_scheduler_config(**_UpperCAmelCase ) lowercase__ = scheduler_class(**_UpperCAmelCase ) lowercase__ = 10 lowercase__ = self.dummy_model() lowercase__ = self.dummy_sample_deter scheduler.set_timesteps(_UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): lowercase__ = model(_UpperCAmelCase, _UpperCAmelCase ) lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ).prev_sample for i, t in enumerate(scheduler.timesteps ): lowercase__ = model(_UpperCAmelCase, _UpperCAmelCase ) lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ).prev_sample return sample def snake_case__ ( self ): '''simple docstring''' lowercase__ = dict(self.forward_default_kwargs ) lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase ) for scheduler_class in self.scheduler_classes: lowercase__ = self.get_scheduler_config() lowercase__ = scheduler_class(**_UpperCAmelCase ) lowercase__ = self.dummy_sample lowercase__ = 0.1 * sample if num_inference_steps is not None and hasattr(_UpperCAmelCase, "set_timesteps" ): scheduler.set_timesteps(_UpperCAmelCase ) elif num_inference_steps is not None and not hasattr(_UpperCAmelCase, "set_timesteps" ): lowercase__ = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] lowercase__ = dummy_past_residuals[:] lowercase__ = scheduler.timesteps[5] lowercase__ = scheduler.timesteps[6] lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample self.assertEqual(output_a.shape, sample.shape ) self.assertEqual(output_a.shape, output_a.shape ) lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample self.assertEqual(output_a.shape, sample.shape ) self.assertEqual(output_a.shape, output_a.shape ) def snake_case__ ( self ): '''simple docstring''' for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=_UpperCAmelCase, time_step=_UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100] ): self.check_over_forward(num_inference_steps=_UpperCAmelCase, time_step=_UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.full_loop() lowercase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_mean.item() - 254_0529 ) < 10
668
1
"""simple docstring""" def __a ( A , A ): '''simple docstring''' return price * (1 + tax_rate) if __name__ == "__main__": print(F'{price_plus_tax(1_0_0, 0.25) = }') print(F'{price_plus_tax(125.50, 0.05) = }')
668
"""simple docstring""" import json import os import unittest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class a__ ( _a , unittest.TestCase ): snake_case_ = MgpstrTokenizer snake_case_ = False snake_case_ = {} snake_case_ = False def snake_case__ ( self ): '''simple docstring''' super().setUp() # fmt: off lowercase__ = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"] # fmt: on lowercase__ = dict(zip(_UpperCAmelCase, range(len(_UpperCAmelCase ) ) ) ) lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file, "w", encoding="utf-8" ) as fp: fp.write(json.dumps(_UpperCAmelCase ) + "\n" ) def snake_case__ ( self, **_UpperCAmelCase ): '''simple docstring''' return MgpstrTokenizer.from_pretrained(self.tmpdirname, **_UpperCAmelCase ) def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' lowercase__ = "tester" lowercase__ = "tester" return input_text, output_text @unittest.skip("MGP-STR always lower cases letters." ) def snake_case__ ( self ): '''simple docstring''' pass def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.get_tokenizers(do_lower_case=_UpperCAmelCase ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): lowercase__ = "[SPECIAL_TOKEN]" tokenizer.add_special_tokens({"cls_token": special_token} ) lowercase__ = tokenizer.encode([special_token], add_special_tokens=_UpperCAmelCase ) self.assertEqual(len(_UpperCAmelCase ), 1 ) lowercase__ = tokenizer.decode(_UpperCAmelCase, skip_special_tokens=_UpperCAmelCase ) self.assertTrue(special_token not in decoded ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): lowercase__ , lowercase__ = self.get_input_output_texts(_UpperCAmelCase ) lowercase__ = tokenizer.tokenize(_UpperCAmelCase ) lowercase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) self.assertNotEqual(len(_UpperCAmelCase ), 0 ) lowercase__ = tokenizer.decode(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase ) self.assertEqual(text_a.replace(" ", "" ), _UpperCAmelCase ) @unittest.skip("MGP-STR tokenizer only handles one sequence." ) def snake_case__ ( self ): '''simple docstring''' pass @unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" ) def snake_case__ ( self ): '''simple docstring''' pass
668
1
"""simple docstring""" import unittest from transformers import CamembertTokenizer, CamembertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import is_torch_available from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase_: str = get_tests_dir("fixtures/test_sentencepiece.model") lowerCAmelCase_: int = get_tests_dir("fixtures/test_sentencepiece_bpe.model") lowerCAmelCase_: List[Any] = "pt" if is_torch_available() else "tf" @require_sentencepiece @require_tokenizers class a__ ( _a , unittest.TestCase ): snake_case_ = CamembertTokenizer snake_case_ = CamembertTokenizerFast snake_case_ = True snake_case_ = True def snake_case__ ( self ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing lowercase__ = CamembertTokenizer(_UpperCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = "<pad>" lowercase__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ), _UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ), _UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0], "<s>NOTUSED" ) self.assertEqual(vocab_keys[1], "<pad>" ) self.assertEqual(vocab_keys[-1], "<mask>" ) self.assertEqual(len(_UpperCAmelCase ), 1004 ) def snake_case__ ( self ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size, 1005 ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = CamembertTokenizer(_UpperCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) lowercase__ = CamembertTokenizerFast.from_pretrained(self.tmpdirname ) lowercase__ = "I was born in 92000, and this is falsé." lowercase__ = tokenizer.encode(_UpperCAmelCase ) lowercase__ = rust_tokenizer.encode(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase ) lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) lowercase__ = rust_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase ) # <unk> tokens are not the same for `rust` than for `slow`. # Because spm gives back raw token instead of `unk` in EncodeAsPieces # tokens = tokenizer.tokenize(sequence) lowercase__ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) lowercase__ = rust_tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' if not self.test_rust_tokenizer: return lowercase__ = self.get_tokenizer() lowercase__ = self.get_rust_tokenizer() lowercase__ = "I was born in 92000, and this is falsé." lowercase__ = tokenizer.tokenize(_UpperCAmelCase ) lowercase__ = rust_tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase ) lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) lowercase__ = rust_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase ) lowercase__ = self.get_rust_tokenizer() lowercase__ = tokenizer.encode(_UpperCAmelCase ) lowercase__ = rust_tokenizer.encode(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase ) @slow def snake_case__ ( self ): '''simple docstring''' lowercase__ = {"input_ids": [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 2_7575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 2_2804, 1_8818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 1_0326, 24, 2267, 20, 416, 5072, 1_5612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # camembert is a french model. So we also use french texts. lowercase__ = [ "Le transformeur est un modèle d'apprentissage profond introduit en 2017, " "utilisé principalement dans le domaine du traitement automatique des langues (TAL).", "À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus " "pour gérer des données séquentielles, telles que le langage naturel, pour des tâches " "telles que la traduction et la synthèse de texte.", ] self.tokenizer_integration_test_util( expected_encoding=_UpperCAmelCase, model_name="camembert-base", revision="3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf", sequences=_UpperCAmelCase, )
668
"""simple docstring""" from ...utils import is_note_seq_available, is_transformers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .notes_encoder import SpectrogramNotesEncoder from .continous_encoder import SpectrogramContEncoder from .pipeline_spectrogram_diffusion import ( SpectrogramContEncoder, SpectrogramDiffusionPipeline, TaFilmDecoder, ) try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .midi_utils import MidiProcessor
668
1
"""simple docstring""" from math import loga def __a ( A ): '''simple docstring''' if a < 0: raise ValueError("Input value must be a positive integer" ) elif isinstance(A , A ): raise TypeError("Input value must be a 'int' type" ) return 0 if (a == 0) else int(loga(a & -a ) ) if __name__ == "__main__": import doctest doctest.testmod()
668
"""simple docstring""" from typing import Any import numpy as np def __a ( A ): '''simple docstring''' return np.array_equal(A , matrix.conjugate().T ) def __a ( A , A ): '''simple docstring''' lowercase__ = v.conjugate().T lowercase__ = v_star.dot(A ) assert isinstance(A , np.ndarray ) return (v_star_dot.dot(A )) / (v_star.dot(A )) def __a ( ): '''simple docstring''' lowercase__ = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] ) lowercase__ = np.array([[1], [2], [3]] ) assert is_hermitian(A ), f'''{a} is not hermitian.''' print(rayleigh_quotient(A , A ) ) lowercase__ = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] ) assert is_hermitian(A ), f'''{a} is not hermitian.''' assert rayleigh_quotient(A , A ) == float(3 ) if __name__ == "__main__": import doctest doctest.testmod() tests()
668
1
"""simple docstring""" import unittest from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow if is_flax_available(): import jax from transformers.models.auto.modeling_flax_auto import FlaxAutoModel from transformers.models.bert.modeling_flax_bert import FlaxBertModel from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel @require_flax class a__ ( unittest.TestCase ): @slow def snake_case__ ( self ): '''simple docstring''' for model_name in ["bert-base-cased", "bert-large-uncased"]: with self.subTest(_UpperCAmelCase ): lowercase__ = AutoConfig.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase ) lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase ) @slow def snake_case__ ( self ): '''simple docstring''' for model_name in ["roberta-base", "roberta-large"]: with self.subTest(_UpperCAmelCase ): lowercase__ = AutoConfig.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase ) lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase ) @slow def snake_case__ ( self ): '''simple docstring''' for model_name in ["bert-base-cased", "bert-large-uncased"]: lowercase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase ) lowercase__ = FlaxBertModel.from_pretrained(_UpperCAmelCase ) lowercase__ = tokenizer("Do you support jax jitted function?", return_tensors=TensorType.JAX ) @jax.jit def eval(**_UpperCAmelCase ): return model(**_UpperCAmelCase ) eval(**_UpperCAmelCase ).block_until_ready() @slow def snake_case__ ( self ): '''simple docstring''' for model_name in ["roberta-base", "roberta-large"]: lowercase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase ) lowercase__ = FlaxRobertaModel.from_pretrained(_UpperCAmelCase ) lowercase__ = tokenizer("Do you support jax jitted function?", return_tensors=TensorType.JAX ) @jax.jit def eval(**_UpperCAmelCase ): return model(**_UpperCAmelCase ) eval(**_UpperCAmelCase ).block_until_ready() def snake_case__ ( self ): '''simple docstring''' with self.assertRaisesRegex( _UpperCAmelCase, "bert-base is not a local folder and is not a valid model identifier" ): lowercase__ = FlaxAutoModel.from_pretrained("bert-base" ) def snake_case__ ( self ): '''simple docstring''' with self.assertRaisesRegex( _UpperCAmelCase, R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase, revision="aaaaaa" ) def snake_case__ ( self ): '''simple docstring''' with self.assertRaisesRegex( _UpperCAmelCase, "hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack", ): lowercase__ = FlaxAutoModel.from_pretrained("hf-internal-testing/config-no-model" ) def snake_case__ ( self ): '''simple docstring''' with self.assertRaisesRegex(_UpperCAmelCase, "Use `from_pt=True` to load this model" ): lowercase__ = FlaxAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
668
"""simple docstring""" import gc import inspect import unittest import torch from parameterized import parameterized from diffusers import PriorTransformer from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin enable_full_determinism() class a__ ( _a , unittest.TestCase ): snake_case_ = PriorTransformer snake_case_ = "hidden_states" @property def snake_case__ ( self ): '''simple docstring''' lowercase__ = 4 lowercase__ = 8 lowercase__ = 7 lowercase__ = floats_tensor((batch_size, embedding_dim) ).to(_UpperCAmelCase ) lowercase__ = floats_tensor((batch_size, embedding_dim) ).to(_UpperCAmelCase ) lowercase__ = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def snake_case__ ( self, _UpperCAmelCase=0 ): '''simple docstring''' torch.manual_seed(_UpperCAmelCase ) lowercase__ = 4 lowercase__ = 8 lowercase__ = 7 lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase ) lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase ) lowercase__ = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } @property def snake_case__ ( self ): '''simple docstring''' return (4, 8) @property def snake_case__ ( self ): '''simple docstring''' return (4, 8) def snake_case__ ( self ): '''simple docstring''' lowercase__ = { "num_attention_heads": 2, "attention_head_dim": 4, "num_layers": 2, "embedding_dim": 8, "num_embeddings": 7, "additional_embeddings": 4, } lowercase__ = self.dummy_input return init_dict, inputs_dict def snake_case__ ( self ): '''simple docstring''' lowercase__ , lowercase__ = PriorTransformer.from_pretrained( "hf-internal-testing/prior-dummy", output_loading_info=_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) self.assertEqual(len(loading_info["missing_keys"] ), 0 ) model.to(_UpperCAmelCase ) lowercase__ = model(**self.dummy_input )[0] assert hidden_states is not None, "Make sure output is not None" def snake_case__ ( self ): '''simple docstring''' lowercase__ , lowercase__ = self.prepare_init_args_and_inputs_for_common() lowercase__ = self.model_class(**_UpperCAmelCase ) lowercase__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ = [*signature.parameters.keys()] lowercase__ = ["hidden_states", "timestep"] self.assertListEqual(arg_names[:2], _UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy" ) lowercase__ = model.to(_UpperCAmelCase ) if hasattr(_UpperCAmelCase, "set_default_attn_processor" ): model.set_default_attn_processor() lowercase__ = self.get_dummy_seed_input() with torch.no_grad(): lowercase__ = model(**_UpperCAmelCase )[0] lowercase__ = output[0, :5].flatten().cpu() print(_UpperCAmelCase ) # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. lowercase__ = torch.tensor([-1.3_436, -0.2_870, 0.7_538, 0.4_368, -0.0_239] ) self.assertTrue(torch_all_close(_UpperCAmelCase, _UpperCAmelCase, rtol=1E-2 ) ) @slow class a__ ( unittest.TestCase ): def snake_case__ ( self, _UpperCAmelCase=1, _UpperCAmelCase=768, _UpperCAmelCase=77, _UpperCAmelCase=0 ): '''simple docstring''' torch.manual_seed(_UpperCAmelCase ) lowercase__ = batch_size lowercase__ = embedding_dim lowercase__ = num_embeddings lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase ) lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase ) lowercase__ = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def snake_case__ ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @parameterized.expand( [ # fmt: off [13, [-0.5_861, 0.1_283, -0.0_931, 0.0_882, 0.4_476, 0.1_329, -0.0_498, 0.0_640]], [37, [-0.4_913, 0.0_110, -0.0_483, 0.0_541, 0.4_954, -0.0_170, 0.0_354, 0.1_651]], # fmt: on ] ) def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' lowercase__ = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior", subfolder="prior" ) model.to(_UpperCAmelCase ) lowercase__ = self.get_dummy_seed_input(seed=_UpperCAmelCase ) with torch.no_grad(): lowercase__ = model(**_UpperCAmelCase )[0] assert list(sample.shape ) == [1, 768] lowercase__ = sample[0, :8].flatten().cpu() print(_UpperCAmelCase ) lowercase__ = torch.tensor(_UpperCAmelCase ) assert torch_all_close(_UpperCAmelCase, _UpperCAmelCase, atol=1E-3 )
668
1
"""simple docstring""" import gc import inspect import unittest import torch from parameterized import parameterized from diffusers import PriorTransformer from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin enable_full_determinism() class a__ ( _a , unittest.TestCase ): snake_case_ = PriorTransformer snake_case_ = "hidden_states" @property def snake_case__ ( self ): '''simple docstring''' lowercase__ = 4 lowercase__ = 8 lowercase__ = 7 lowercase__ = floats_tensor((batch_size, embedding_dim) ).to(_UpperCAmelCase ) lowercase__ = floats_tensor((batch_size, embedding_dim) ).to(_UpperCAmelCase ) lowercase__ = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def snake_case__ ( self, _UpperCAmelCase=0 ): '''simple docstring''' torch.manual_seed(_UpperCAmelCase ) lowercase__ = 4 lowercase__ = 8 lowercase__ = 7 lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase ) lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase ) lowercase__ = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } @property def snake_case__ ( self ): '''simple docstring''' return (4, 8) @property def snake_case__ ( self ): '''simple docstring''' return (4, 8) def snake_case__ ( self ): '''simple docstring''' lowercase__ = { "num_attention_heads": 2, "attention_head_dim": 4, "num_layers": 2, "embedding_dim": 8, "num_embeddings": 7, "additional_embeddings": 4, } lowercase__ = self.dummy_input return init_dict, inputs_dict def snake_case__ ( self ): '''simple docstring''' lowercase__ , lowercase__ = PriorTransformer.from_pretrained( "hf-internal-testing/prior-dummy", output_loading_info=_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) self.assertEqual(len(loading_info["missing_keys"] ), 0 ) model.to(_UpperCAmelCase ) lowercase__ = model(**self.dummy_input )[0] assert hidden_states is not None, "Make sure output is not None" def snake_case__ ( self ): '''simple docstring''' lowercase__ , lowercase__ = self.prepare_init_args_and_inputs_for_common() lowercase__ = self.model_class(**_UpperCAmelCase ) lowercase__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ = [*signature.parameters.keys()] lowercase__ = ["hidden_states", "timestep"] self.assertListEqual(arg_names[:2], _UpperCAmelCase ) def snake_case__ ( self ): '''simple docstring''' lowercase__ = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy" ) lowercase__ = model.to(_UpperCAmelCase ) if hasattr(_UpperCAmelCase, "set_default_attn_processor" ): model.set_default_attn_processor() lowercase__ = self.get_dummy_seed_input() with torch.no_grad(): lowercase__ = model(**_UpperCAmelCase )[0] lowercase__ = output[0, :5].flatten().cpu() print(_UpperCAmelCase ) # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. lowercase__ = torch.tensor([-1.3_436, -0.2_870, 0.7_538, 0.4_368, -0.0_239] ) self.assertTrue(torch_all_close(_UpperCAmelCase, _UpperCAmelCase, rtol=1E-2 ) ) @slow class a__ ( unittest.TestCase ): def snake_case__ ( self, _UpperCAmelCase=1, _UpperCAmelCase=768, _UpperCAmelCase=77, _UpperCAmelCase=0 ): '''simple docstring''' torch.manual_seed(_UpperCAmelCase ) lowercase__ = batch_size lowercase__ = embedding_dim lowercase__ = num_embeddings lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase ) lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase ) lowercase__ = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def snake_case__ ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @parameterized.expand( [ # fmt: off [13, [-0.5_861, 0.1_283, -0.0_931, 0.0_882, 0.4_476, 0.1_329, -0.0_498, 0.0_640]], [37, [-0.4_913, 0.0_110, -0.0_483, 0.0_541, 0.4_954, -0.0_170, 0.0_354, 0.1_651]], # fmt: on ] ) def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' lowercase__ = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior", subfolder="prior" ) model.to(_UpperCAmelCase ) lowercase__ = self.get_dummy_seed_input(seed=_UpperCAmelCase ) with torch.no_grad(): lowercase__ = model(**_UpperCAmelCase )[0] assert list(sample.shape ) == [1, 768] lowercase__ = sample[0, :8].flatten().cpu() print(_UpperCAmelCase ) lowercase__ = torch.tensor(_UpperCAmelCase ) assert torch_all_close(_UpperCAmelCase, _UpperCAmelCase, atol=1E-3 )
668
"""simple docstring""" lowerCAmelCase_: Any = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" def __a ( A ): '''simple docstring''' if not isinstance(A , A ): lowercase__ = f'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(A ) lowercase__ = "".join(bin(A )[2:].zfill(8 ) for byte in data ) lowercase__ = len(A ) % 6 != 0 if padding_needed: # The padding that will be added later lowercase__ = b"=" * ((6 - len(A ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(A ) % 6) else: lowercase__ = b"" # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(A ) , 6 ) ).encode() + padding ) def __a ( A ): '''simple docstring''' if not isinstance(A , A ) and not isinstance(A , A ): lowercase__ = ( "argument should be a bytes-like object or ASCII string, " f'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(A ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(A , A ): try: lowercase__ = encoded_data.decode("utf-8" ) except UnicodeDecodeError: raise ValueError("base64 encoded data should only contain ASCII characters" ) lowercase__ = encoded_data.count("=" ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(A ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one lowercase__ = encoded_data[:-padding] lowercase__ = "".join( bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: lowercase__ = "".join( bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data ) lowercase__ = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(A ) , 8 ) ] return bytes(A ) if __name__ == "__main__": import doctest doctest.testmod()
668
1
"""simple docstring""" from __future__ import annotations from fractions import Fraction def __a ( A , A ): '''simple docstring''' return ( num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den ) def __a ( A ): '''simple docstring''' lowercase__ = [] lowercase__ = 11 lowercase__ = int("1" + "0" * digit_len ) for num in range(A , A ): while den <= 99: if (num != den) and (num % 10 == den // 10) and (den % 10 != 0): if is_digit_cancelling(A , A ): solutions.append(f'''{num}/{den}''' ) den += 1 num += 1 lowercase__ = 10 return solutions def __a ( A = 2 ): '''simple docstring''' lowercase__ = 1.0 for fraction in fraction_list(A ): lowercase__ = Fraction(A ) result *= frac.denominator / frac.numerator return int(A ) if __name__ == "__main__": print(solution())
668
"""simple docstring""" from sympy import diff, lambdify, symbols from sympy.functions import * # noqa: F403 def __a ( A , A , A = "x" , A = 10**-10 , A = 1 , ): '''simple docstring''' lowercase__ = symbols(A ) lowercase__ = lambdify(A , A ) lowercase__ = lambdify(A , diff(A , A ) ) lowercase__ = starting_point while True: if diff_function(A ) != 0: lowercase__ = prev_guess - multiplicity * func(A ) / diff_function( A ) else: raise ZeroDivisionError("Could not find root" ) from None # Precision is checked by comparing the difference of consecutive guesses if abs(next_guess - prev_guess ) < precision: return next_guess lowercase__ = next_guess # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(F'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}') # Find root of polynomial # Find fourth Root of 5 print(F'The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}') # Find value of e print( "The root of log(y) - 1 = 0 is ", F'{newton_raphson("log(y) - 1", 2, variable="y")}', ) # Exponential Roots print( "The root of exp(x) - 1 = 0 is", F'{newton_raphson("exp(x) - 1", 1_0, precision=0.005)}', ) # Find root of cos(x) print(F'The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}')
668
1
"""simple docstring""" from __future__ import annotations def __a ( A , A , A ): '''simple docstring''' lowercase__ = list(range(len(A ) ) ) lowercase__ = [v / w for v, w in zip(A , A )] index.sort(key=lambda A : ratio[i] , reverse=A ) lowercase__ = 0 lowercase__ = [0] * len(A ) for i in index: if weight[i] <= capacity: lowercase__ = 1 max_value += value[i] capacity -= weight[i] else: lowercase__ = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
668
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase_: Union[str, Any] = { "configuration_distilbert": [ "DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DistilBertConfig", "DistilBertOnnxConfig", ], "tokenization_distilbert": ["DistilBertTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_: Union[str, Any] = ["DistilBertTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_: Any = [ "DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "DistilBertForMaskedLM", "DistilBertForMultipleChoice", "DistilBertForQuestionAnswering", "DistilBertForSequenceClassification", "DistilBertForTokenClassification", "DistilBertModel", "DistilBertPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_: Tuple = [ "TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFDistilBertForMaskedLM", "TFDistilBertForMultipleChoice", "TFDistilBertForQuestionAnswering", "TFDistilBertForSequenceClassification", "TFDistilBertForTokenClassification", "TFDistilBertMainLayer", "TFDistilBertModel", "TFDistilBertPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_: Optional[Any] = [ "FlaxDistilBertForMaskedLM", "FlaxDistilBertForMultipleChoice", "FlaxDistilBertForQuestionAnswering", "FlaxDistilBertForSequenceClassification", "FlaxDistilBertForTokenClassification", "FlaxDistilBertModel", "FlaxDistilBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_distilbert import ( DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertOnnxConfig, ) from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel, ) else: import sys lowerCAmelCase_: Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
668
1
"""simple docstring""" def __a ( A , A ): '''simple docstring''' if digit_amount > 0: return round(number - int(A ) , A ) return number - int(A ) if __name__ == "__main__": print(decimal_isolate(1.53, 0)) print(decimal_isolate(35.345, 1)) print(decimal_isolate(35.345, 2)) print(decimal_isolate(35.345, 3)) print(decimal_isolate(-14.789, 3)) print(decimal_isolate(0, 2)) print(decimal_isolate(-14.123, 1)) print(decimal_isolate(-14.123, 2)) print(decimal_isolate(-14.123, 3))
668
"""simple docstring""" from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging lowerCAmelCase_: Union[str, Any] = logging.get_logger(__name__) class a__ ( _a ): snake_case_ = ["audio_values", "audio_mask"] def __init__( self, _UpperCAmelCase=2048, _UpperCAmelCase=1, _UpperCAmelCase=[16, 16], _UpperCAmelCase=128, _UpperCAmelCase=4_4100, _UpperCAmelCase=86, _UpperCAmelCase=2048, _UpperCAmelCase=0.0, **_UpperCAmelCase, ): '''simple docstring''' super().__init__( feature_size=_UpperCAmelCase, sampling_rate=_UpperCAmelCase, padding_value=_UpperCAmelCase, **_UpperCAmelCase, ) lowercase__ = spectrogram_length lowercase__ = num_channels lowercase__ = patch_size lowercase__ = feature_size // self.patch_size[1] lowercase__ = n_fft lowercase__ = sampling_rate // hop_length_to_sampling_rate lowercase__ = sampling_rate lowercase__ = padding_value lowercase__ = mel_filter_bank( num_frequency_bins=1 + n_fft // 2, num_mel_filters=_UpperCAmelCase, min_frequency=0.0, max_frequency=22_050.0, sampling_rate=_UpperCAmelCase, norm="slaney", mel_scale="slaney", ).T def snake_case__ ( self, _UpperCAmelCase ): '''simple docstring''' lowercase__ = spectrogram( _UpperCAmelCase, window_function(self.n_fft, "hann" ), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters.T, log_mel="dB", db_range=80.0, ) lowercase__ = log_spec[:, :-1] lowercase__ = log_spec - 20.0 lowercase__ = np.clip(log_spec / 40.0, -2.0, 0.0 ) + 1.0 return log_spec def __call__( self, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = True, _UpperCAmelCase = None, _UpperCAmelCase = False, _UpperCAmelCase = False, **_UpperCAmelCase, ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( "This feature extractor is set to support sampling rate" F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled''' F''' with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) lowercase__ = isinstance(_UpperCAmelCase, np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) lowercase__ = is_batched_numpy or ( isinstance(_UpperCAmelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) )) ) if is_batched: lowercase__ = [np.asarray([speech], dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(_UpperCAmelCase, np.ndarray ): lowercase__ = np.asarray(_UpperCAmelCase, dtype=np.floataa ) elif isinstance(_UpperCAmelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowercase__ = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowercase__ = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis lowercase__ = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0], _UpperCAmelCase ): lowercase__ = [np.asarray(_UpperCAmelCase, dtype=np.floataa ) for feature in audio_features] # Create audio attention mask lowercase__ = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: lowercase__ = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] lowercase__ = np.array(_UpperCAmelCase ).astype(np.floataa ) # convert into correct format for padding lowercase__ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch lowercase__ = np.ones([len(_UpperCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) lowercase__ = padded_audio_features * self.padding_value for i in range(len(_UpperCAmelCase ) ): lowercase__ = audio_features[i] lowercase__ = feature # return as BatchFeature if return_attention_mask: lowercase__ = {"audio_values": padded_audio_features, "audio_mask": audio_mask} else: lowercase__ = {"audio_values": padded_audio_features} lowercase__ = BatchFeature(data=_UpperCAmelCase, tensor_type=_UpperCAmelCase ) return encoded_inputs
668
1
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class a__ ( _a ): snake_case_ = 42 snake_case_ = 42 def __init__( self, _UpperCAmelCase, _UpperCAmelCase ): '''simple docstring''' super().__init__() self.register_modules(unet=_UpperCAmelCase, scheduler=_UpperCAmelCase ) @torch.no_grad() def __call__( self, _UpperCAmelCase = 1, _UpperCAmelCase = 2000, _UpperCAmelCase = None, _UpperCAmelCase = "pil", _UpperCAmelCase = True, **_UpperCAmelCase, ): '''simple docstring''' lowercase__ = self.unet.config.sample_size lowercase__ = (batch_size, 3, img_size, img_size) lowercase__ = self.unet lowercase__ = randn_tensor(_UpperCAmelCase, generator=_UpperCAmelCase ) * self.scheduler.init_noise_sigma lowercase__ = sample.to(self.device ) self.scheduler.set_timesteps(_UpperCAmelCase ) self.scheduler.set_sigmas(_UpperCAmelCase ) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): lowercase__ = self.scheduler.sigmas[i] * torch.ones(shape[0], device=self.device ) # correction step for _ in range(self.scheduler.config.correct_steps ): lowercase__ = self.unet(_UpperCAmelCase, _UpperCAmelCase ).sample lowercase__ = self.scheduler.step_correct(_UpperCAmelCase, _UpperCAmelCase, generator=_UpperCAmelCase ).prev_sample # prediction step lowercase__ = model(_UpperCAmelCase, _UpperCAmelCase ).sample lowercase__ = self.scheduler.step_pred(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, generator=_UpperCAmelCase ) lowercase__ , lowercase__ = output.prev_sample, output.prev_sample_mean lowercase__ = sample_mean.clamp(0, 1 ) lowercase__ = sample.cpu().permute(0, 2, 3, 1 ).numpy() if output_type == "pil": lowercase__ = self.numpy_to_pil(_UpperCAmelCase ) if not return_dict: return (sample,) return ImagePipelineOutput(images=_UpperCAmelCase )
668
"""simple docstring""" from __future__ import annotations import math def __a ( A ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True lowerCAmelCase_: Optional[Any] = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)] def __a ( A ): '''simple docstring''' if not isinstance(A , A ): raise ValueError("n must be an integer" ) if n <= 0: raise ValueError("n must be >= 0" ) lowercase__ = [] for num in range(len(A ) ): lowercase__ = 0 while 2 * i * i <= odd_composites[num]: lowercase__ = odd_composites[num] - 2 * i * i if is_prime(A ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(A ) == n: return list_nums return [] def __a ( ): '''simple docstring''' return compute_nums(1 )[0] if __name__ == "__main__": print(F'{solution() = }')
668
1
"""simple docstring""" def __a ( A ): '''simple docstring''' lowercase__ = len(A ) for i in range(1 , A ): lowercase__ = collection[i] lowercase__ = 0 lowercase__ = i - 1 while low <= high: lowercase__ = (low + high) // 2 if val < collection[mid]: lowercase__ = mid - 1 else: lowercase__ = mid + 1 for j in range(A , A , -1 ): lowercase__ = collection[j - 1] lowercase__ = val return collection if __name__ == "__main__": lowerCAmelCase_: str = input("Enter numbers separated by a comma:\n").strip() lowerCAmelCase_: str = [int(item) for item in user_input.split(",")] print(binary_insertion_sort(unsorted))
668
"""simple docstring""" import os import sys lowerCAmelCase_: Any = os.path.join(os.path.dirname(__file__), "src") sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) lowerCAmelCase_: Union[str, Any] = [ "torch", "numpy", "tokenizers", "filelock", "requests", "tqdm", "regex", "sentencepiece", "sacremoses", "importlib_metadata", "huggingface_hub", ] @add_start_docstrings(AutoConfig.__doc__ ) def __a ( *A , **A ): '''simple docstring''' return AutoConfig.from_pretrained(*A , **A ) @add_start_docstrings(AutoTokenizer.__doc__ ) def __a ( *A , **A ): '''simple docstring''' return AutoTokenizer.from_pretrained(*A , **A ) @add_start_docstrings(AutoModel.__doc__ ) def __a ( *A , **A ): '''simple docstring''' return AutoModel.from_pretrained(*A , **A ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def __a ( *A , **A ): '''simple docstring''' return AutoModelForCausalLM.from_pretrained(*A , **A ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def __a ( *A , **A ): '''simple docstring''' return AutoModelForMaskedLM.from_pretrained(*A , **A ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def __a ( *A , **A ): '''simple docstring''' return AutoModelForSequenceClassification.from_pretrained(*A , **A ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def __a ( *A , **A ): '''simple docstring''' return AutoModelForQuestionAnswering.from_pretrained(*A , **A )
668
1
"""simple docstring""" from math import factorial def __a ( A , A , A ): '''simple docstring''' if successes > trials: raise ValueError("successes must be lower or equal to trials" ) if trials < 0 or successes < 0: raise ValueError("the function is defined for non-negative integers" ) if not isinstance(A , A ) or not isinstance(A , A ): raise ValueError("the function is defined for non-negative integers" ) if not 0 < prob < 1: raise ValueError("prob has to be in range of 1 - 0" ) lowercase__ = (prob**successes) * ((1 - prob) ** (trials - successes)) # Calculate the binomial coefficient: n! / k!(n-k)! lowercase__ = float(factorial(A ) ) coefficient /= factorial(A ) * factorial(trials - successes ) return probability * coefficient if __name__ == "__main__": from doctest import testmod testmod() print("Probability of 2 successes out of 4 trails") print("with probability of 0.75 is:", end=" ") print(binomial_distribution(2, 4, 0.75))
668
"""simple docstring""" import unittest from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow if is_flax_available(): import jax from transformers.models.auto.modeling_flax_auto import FlaxAutoModel from transformers.models.bert.modeling_flax_bert import FlaxBertModel from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel @require_flax class a__ ( unittest.TestCase ): @slow def snake_case__ ( self ): '''simple docstring''' for model_name in ["bert-base-cased", "bert-large-uncased"]: with self.subTest(_UpperCAmelCase ): lowercase__ = AutoConfig.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase ) lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase ) @slow def snake_case__ ( self ): '''simple docstring''' for model_name in ["roberta-base", "roberta-large"]: with self.subTest(_UpperCAmelCase ): lowercase__ = AutoConfig.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase ) lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase ) @slow def snake_case__ ( self ): '''simple docstring''' for model_name in ["bert-base-cased", "bert-large-uncased"]: lowercase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase ) lowercase__ = FlaxBertModel.from_pretrained(_UpperCAmelCase ) lowercase__ = tokenizer("Do you support jax jitted function?", return_tensors=TensorType.JAX ) @jax.jit def eval(**_UpperCAmelCase ): return model(**_UpperCAmelCase ) eval(**_UpperCAmelCase ).block_until_ready() @slow def snake_case__ ( self ): '''simple docstring''' for model_name in ["roberta-base", "roberta-large"]: lowercase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase ) lowercase__ = FlaxRobertaModel.from_pretrained(_UpperCAmelCase ) lowercase__ = tokenizer("Do you support jax jitted function?", return_tensors=TensorType.JAX ) @jax.jit def eval(**_UpperCAmelCase ): return model(**_UpperCAmelCase ) eval(**_UpperCAmelCase ).block_until_ready() def snake_case__ ( self ): '''simple docstring''' with self.assertRaisesRegex( _UpperCAmelCase, "bert-base is not a local folder and is not a valid model identifier" ): lowercase__ = FlaxAutoModel.from_pretrained("bert-base" ) def snake_case__ ( self ): '''simple docstring''' with self.assertRaisesRegex( _UpperCAmelCase, R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase, revision="aaaaaa" ) def snake_case__ ( self ): '''simple docstring''' with self.assertRaisesRegex( _UpperCAmelCase, "hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack", ): lowercase__ = FlaxAutoModel.from_pretrained("hf-internal-testing/config-no-model" ) def snake_case__ ( self ): '''simple docstring''' with self.assertRaisesRegex(_UpperCAmelCase, "Use `from_pt=True` to load this model" ): lowercase__ = FlaxAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
668
1