code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_mobilebert import MobileBertTokenizer _lowerCamelCase : Optional[int] = logging.get_logger(__name__) _lowerCamelCase : str = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} _lowerCamelCase : Dict = { """vocab_file""": {"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"""}, """tokenizer_file""": { """mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json""" }, } _lowerCamelCase : Optional[int] = {"""mobilebert-uncased""": 512} _lowerCamelCase : str = {} class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = PRETRAINED_INIT_CONFIGURATION UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = MobileBertTokenizer def __init__( self : Any , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Union[str, Any]="[UNK]" , UpperCAmelCase__ : Tuple="[SEP]" , UpperCAmelCase__ : List[Any]="[PAD]" , UpperCAmelCase__ : List[Any]="[CLS]" , UpperCAmelCase__ : Optional[int]="[MASK]" , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : Dict , ) ->Tuple: '''simple docstring''' super().__init__( UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , tokenize_chinese_chars=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ , **UpperCAmelCase__ , ) A__ = json.loads(self.backend_tokenizer.normalizer.__getstate__()) if ( normalizer_state.get('''lowercase''' , UpperCAmelCase__) != do_lower_case or normalizer_state.get('''strip_accents''' , UpperCAmelCase__) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , UpperCAmelCase__) != tokenize_chinese_chars ): A__ = getattr(UpperCAmelCase__ , normalizer_state.pop('''type''')) A__ = do_lower_case A__ = strip_accents A__ = tokenize_chinese_chars A__ = normalizer_class(**UpperCAmelCase__) A__ = do_lower_case def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any]=None) ->Optional[Any]: '''simple docstring''' A__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None) ->List[int]: '''simple docstring''' A__ = [self.sep_token_id] A__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None) ->Tuple[str]: '''simple docstring''' A__ = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__) return tuple(UpperCAmelCase__)
14
from __future__ import annotations def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> tuple[float, list[float]]: """simple docstring""" A__ = list(range(len(lowercase_ ) ) ) A__ = [v / w for v, w in zip(lowercase_ , lowercase_ )] index.sort(key=lambda lowercase_ : ratio[i] , reverse=lowercase_ ) A__ = 0 A__ = [0] * len(lowercase_ ) for i in index: if weight[i] <= capacity: A__ = 1 max_value += value[i] capacity -= weight[i] else: A__ = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
14
1
from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]: """simple docstring""" for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})""" def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=True ) -> str: """simple docstring""" model.train() A__ = model(lowercase_ ) A__ = F.mse_loss(lowercase_ , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=False ) -> int: """simple docstring""" set_seed(42 ) A__ = RegressionModel() A__ = deepcopy(lowercase_ ) A__ = RegressionDataset(length=80 ) A__ = DataLoader(lowercase_ , batch_size=16 ) model.to(accelerator.device ) if sched: A__ = AdamW(params=model.parameters() , lr=1E-3 ) A__ = AdamW(params=ddp_model.parameters() , lr=1E-3 ) A__ = LambdaLR(lowercase_ , lr_lambda=lambda lowercase_ : epoch**0.65 ) A__ = LambdaLR(lowercase_ , lr_lambda=lambda lowercase_ : epoch**0.65 ) # Make a copy of `model` if sched: A__ , A__ , A__ , A__ = accelerator.prepare(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) else: A__ , A__ = accelerator.prepare(lowercase_ , lowercase_ ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[Any]: """simple docstring""" A__ , A__ , A__ = get_training_setup(lowercase_ ) # Use a single batch A__ , A__ = next(iter(lowercase_ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model A__ , A__ = accelerator.gather((ddp_input, ddp_target) ) A__ , A__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(lowercase_ ): step_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) else: # Sync grads step_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) A__ = ddp_input[torch.randperm(len(lowercase_ ) )] def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[Any]: """simple docstring""" A__ , A__ , A__ = get_training_setup(lowercase_ ) # Use a single batch A__ , A__ = next(iter(lowercase_ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model A__ , A__ = accelerator.gather((ddp_input, ddp_target) ) A__ , A__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(lowercase_ ): step_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) else: # Sync grads step_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) A__ = ddp_input[torch.randperm(len(lowercase_ ) )] def SCREAMING_SNAKE_CASE ( lowercase_=False , lowercase_=False ) -> Dict: """simple docstring""" A__ = Accelerator( split_batches=lowercase_ , dispatch_batches=lowercase_ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly A__ , A__ , A__ = get_training_setup(lowercase_ ) for iteration, batch in enumerate(lowercase_ ): A__ , A__ = batch.values() # Gather the distributed inputs and targs for the base model A__ , A__ = accelerator.gather((ddp_input, ddp_target) ) A__ , A__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) # Do "gradient accumulation" (noop) with accelerator.accumulate(lowercase_ ): step_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(lowercase_ ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) A__ = ddp_input[torch.randperm(len(lowercase_ ) )] GradientState._reset_state() def SCREAMING_SNAKE_CASE ( lowercase_=False , lowercase_=False ) -> int: """simple docstring""" A__ = Accelerator( split_batches=lowercase_ , dispatch_batches=lowercase_ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly A__ , A__ , A__ , A__ , A__ , A__ , A__ = get_training_setup(lowercase_ , lowercase_ ) for iteration, batch in enumerate(lowercase_ ): A__ , A__ = batch.values() # Gather the distributed inputs and targs for the base model A__ , A__ = accelerator.gather((ddp_input, ddp_target) ) A__ , A__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowercase_ )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(lowercase_ ): step_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n""" A__ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowercase_ )) if accelerator.num_processes > 1: check_model_parameters(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) GradientState._reset_state() def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]: """simple docstring""" A__ = Accelerator() A__ = RegressionDataset(length=80 ) A__ = DataLoader(lowercase_ , batch_size=16 ) A__ = RegressionDataset(length=96 ) A__ = DataLoader(lowercase_ , batch_size=16 ) A__ , A__ = accelerator.prepare(lowercase_ , lowercase_ ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(lowercase_ ): assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase_ ) if iteration < len(lowercase_ ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(lowercase_ ): assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase_ ) if batch_num < len(lowercase_ ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def SCREAMING_SNAKE_CASE ( ) -> Dict: """simple docstring""" A__ = Accelerator() A__ = accelerator.state if state.local_process_index == 0: print('''**Test `accumulate` gradient accumulation with dataloader break**''' ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print('''**Test NOOP `no_sync` context manager**''' ) test_noop_sync(lowercase_ ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print('''**Test Distributed `no_sync` context manager**''' ) test_distributed_sync(lowercase_ ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation, ''' , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , ) test_gradient_accumulation(lowercase_ , lowercase_ ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , ) test_gradient_accumulation_with_opt_and_scheduler(lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple: """simple docstring""" main() if __name__ == "__main__": main()
14
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[Any]: """simple docstring""" A__ = args.log_outputs A__ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] ) # load metric A__ = load_metric('''wer''' ) A__ = load_metric('''cer''' ) # compute metrics A__ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] ) A__ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] ) # print & log results A__ = f"""WER: {wer_result}\nCER: {cer_result}""" print(lowercase_ ) with open(f"""{dataset_id}_eval_results.txt""" , '''w''' ) as f: f.write(lowercase_ ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: A__ = f"""log_{dataset_id}_predictions.txt""" A__ = f"""log_{dataset_id}_targets.txt""" with open(lowercase_ , '''w''' ) as p, open(lowercase_ , '''w''' ) as t: # mapping function to write output def write_to_file(lowercase_ , lowercase_ ): p.write(f"""{i}""" + '''\n''' ) p.write(batch['''prediction'''] + '''\n''' ) t.write(f"""{i}""" + '''\n''' ) t.write(batch['''target'''] + '''\n''' ) result.map(lowercase_ , with_indices=lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> str: """simple docstring""" A__ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training A__ = re.sub(lowercase_ , '''''' , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! A__ = ['''\n\n''', '''\n''', ''' ''', ''' '''] for t in token_sequences_to_ignore: A__ = ''' '''.join(text.split(lowercase_ ) ) return text def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]: """simple docstring""" A__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowercase_ ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor A__ = AutoFeatureExtractor.from_pretrained(args.model_id ) A__ = feature_extractor.sampling_rate # resample audio A__ = dataset.cast_column('''audio''' , Audio(sampling_rate=lowercase_ ) ) # load eval pipeline if args.device is None: A__ = 0 if torch.cuda.is_available() else -1 A__ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(lowercase_ ): A__ = asr( batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) A__ = prediction['''text'''] A__ = normalize_text(batch['''sentence'''] ) return batch # run inference on all examples A__ = dataset.map(lowercase_ , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(lowercase_ , lowercase_ ) if __name__ == "__main__": _lowerCamelCase : List[Any] = argparse.ArgumentParser() parser.add_argument( """--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers""" ) parser.add_argument( """--dataset""", type=str, required=True, help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""", ) parser.add_argument( """--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice""" ) parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""") parser.add_argument( """--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds.""" ) parser.add_argument( """--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second.""" ) parser.add_argument( """--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis.""" ) parser.add_argument( """--device""", type=int, default=None, help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""", ) _lowerCamelCase : str = parser.parse_args() main(args)
14
1
from __future__ import annotations _lowerCamelCase : Union[str, Any] = 10 def SCREAMING_SNAKE_CASE ( lowercase_ ) -> list[int]: """simple docstring""" A__ = 1 A__ = max(lowercase_ ) while placement <= max_digit: # declare and initialize empty buckets A__ = [[] for _ in range(lowercase_ )] # split list_of_ints between the buckets for i in list_of_ints: A__ = int((i / placement) % RADIX ) buckets[tmp].append(lowercase_ ) # put each buckets' contents into list_of_ints A__ = 0 for b in range(lowercase_ ): for i in buckets[b]: A__ = i a += 1 # move to next placement *= RADIX return list_of_ints if __name__ == "__main__": import doctest doctest.testmod()
14
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) _lowerCamelCase : int = { """configuration_blip""": [ """BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BlipConfig""", """BlipTextConfig""", """BlipVisionConfig""", ], """processing_blip""": ["""BlipProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Tuple = ["""BlipImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : List[Any] = [ """BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """BlipModel""", """BlipPreTrainedModel""", """BlipForConditionalGeneration""", """BlipForQuestionAnswering""", """BlipVisionModel""", """BlipTextModel""", """BlipForImageTextRetrieval""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Optional[Any] = [ """TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFBlipModel""", """TFBlipPreTrainedModel""", """TFBlipForConditionalGeneration""", """TFBlipForQuestionAnswering""", """TFBlipVisionModel""", """TFBlipTextModel""", """TFBlipForImageTextRetrieval""", ] if TYPE_CHECKING: from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig from .processing_blip import BlipProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_blip import BlipImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip import ( BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipPreTrainedModel, BlipTextModel, BlipVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blip import ( TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, TFBlipModel, TFBlipPreTrainedModel, TFBlipTextModel, TFBlipVisionModel, ) else: import sys _lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
14
1
_lowerCamelCase : Tuple = """ # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git """ _lowerCamelCase : Union[str, Any] = [{"""type""": """code""", """content""": INSTALL_CONTENT}] _lowerCamelCase : Tuple = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
14
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCamelCase : List[str] = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : List[Any] = [ """VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""", """ViTMSNModel""", """ViTMSNForImageClassification""", """ViTMSNPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_msn import ( VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, ) else: import sys _lowerCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
14
1
from copy import deepcopy class UpperCamelCase_ : '''simple docstring''' def __init__( self : Optional[Any] , UpperCAmelCase__ : list[int] | None = None , UpperCAmelCase__ : int | None = None) ->None: '''simple docstring''' if arr is None and size is not None: A__ = size A__ = [0] * size elif arr is not None: self.init(UpperCAmelCase__) else: raise ValueError('''Either arr or size must be specified''') def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : list[int]) ->None: '''simple docstring''' A__ = len(UpperCAmelCase__) A__ = deepcopy(UpperCAmelCase__) for i in range(1 , self.size): A__ = self.next_(UpperCAmelCase__) if j < self.size: self.tree[j] += self.tree[i] def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->list[int]: '''simple docstring''' A__ = self.tree[:] for i in range(self.size - 1 , 0 , -1): A__ = self.next_(UpperCAmelCase__) if j < self.size: arr[j] -= arr[i] return arr @staticmethod def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ : int) ->int: '''simple docstring''' return index + (index & (-index)) @staticmethod def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ : int) ->int: '''simple docstring''' return index - (index & (-index)) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int) ->None: '''simple docstring''' if index == 0: self.tree[0] += value return while index < self.size: self.tree[index] += value A__ = self.next_(UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : int) ->None: '''simple docstring''' self.add(UpperCAmelCase__ , value - self.get(UpperCAmelCase__)) def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : int) ->int: '''simple docstring''' if right == 0: return 0 A__ = self.tree[0] right -= 1 # make right inclusive while right > 0: result += self.tree[right] A__ = self.prev(UpperCAmelCase__) return result def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int) ->int: '''simple docstring''' return self.prefix(UpperCAmelCase__) - self.prefix(UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : int) ->int: '''simple docstring''' return self.query(UpperCAmelCase__ , index + 1) def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : int) ->int: '''simple docstring''' value -= self.tree[0] if value < 0: return -1 A__ = 1 # Largest power of 2 <= size while j * 2 < self.size: j *= 2 A__ = 0 while j > 0: if i + j < self.size and self.tree[i + j] <= value: value -= self.tree[i + j] i += j j //= 2 return i if __name__ == "__main__": import doctest doctest.testmod()
14
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> "list[int]": """simple docstring""" if upper_limit < 0: raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' ) A__ = [0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 A__ = 1 if upper_limit > 0: A__ = 1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 , upper_limit + 1 ): for j in range(lowercase_ ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""") print("""\n*** Enter -1 at any time to quit ***""") print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""") try: while True: _lowerCamelCase : List[Any] = int(input().strip()) if N < 0: print("""\n********* Goodbye!! ************""") break else: print(F'''The Catalan numbers from 0 through {N} are:''') print(catalan_numbers(N)) print("""Try another upper limit for the sequence: """, end="""""") except (NameError, ValueError): print("""\n********* Invalid input, goodbye! ************\n""") import doctest doctest.testmod()
14
1
from ..utils import DummyObject, requires_backends class UpperCamelCase_ ( metaclass=UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = ['''flax''', '''transformers'''] def __init__( self : List[str] , *UpperCAmelCase__ : str , **UpperCAmelCase__ : Optional[Any]) ->str: '''simple docstring''' requires_backends(self , ['''flax''', '''transformers''']) @classmethod def SCREAMING_SNAKE_CASE ( cls : int , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Tuple) ->Dict: '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers''']) @classmethod def SCREAMING_SNAKE_CASE ( cls : Optional[Any] , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : List[str]) ->Union[str, Any]: '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers''']) class UpperCamelCase_ ( metaclass=UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = ['''flax''', '''transformers'''] def __init__( self : Tuple , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : Optional[int]) ->List[Any]: '''simple docstring''' requires_backends(self , ['''flax''', '''transformers''']) @classmethod def SCREAMING_SNAKE_CASE ( cls : int , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : List[Any]) ->Dict: '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers''']) @classmethod def SCREAMING_SNAKE_CASE ( cls : List[str] , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Union[str, Any]) ->List[Any]: '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers''']) class UpperCamelCase_ ( metaclass=UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = ['''flax''', '''transformers'''] def __init__( self : Union[str, Any] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : Any) ->Union[str, Any]: '''simple docstring''' requires_backends(self , ['''flax''', '''transformers''']) @classmethod def SCREAMING_SNAKE_CASE ( cls : int , *UpperCAmelCase__ : str , **UpperCAmelCase__ : Optional[int]) ->Any: '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers''']) @classmethod def SCREAMING_SNAKE_CASE ( cls : Optional[Any] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : Union[str, Any]) ->List[Any]: '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers''']) class UpperCamelCase_ ( metaclass=UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = ['''flax''', '''transformers'''] def __init__( self : int , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : int) ->Union[str, Any]: '''simple docstring''' requires_backends(self , ['''flax''', '''transformers''']) @classmethod def SCREAMING_SNAKE_CASE ( cls : Any , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Optional[int]) ->Dict: '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers''']) @classmethod def SCREAMING_SNAKE_CASE ( cls : List[str] , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : str) ->Optional[Any]: '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers'''])
14
import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict: """simple docstring""" A__ = args.pruning_method A__ = args.threshold A__ = args.model_name_or_path.rstrip('''/''' ) A__ = args.target_model_path print(f"""Load fine-pruned model from {model_name_or_path}""" ) A__ = torch.load(os.path.join(lowercase_ , '''pytorch_model.bin''' ) ) A__ = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: A__ = tensor print(f"""Copied layer {name}""" ) elif "classifier" in name or "qa_output" in name: A__ = tensor print(f"""Copied layer {name}""" ) elif "bias" in name: A__ = tensor print(f"""Copied layer {name}""" ) else: if pruning_method == "magnitude": A__ = MagnitudeBinarizer.apply(inputs=lowercase_ , threshold=lowercase_ ) A__ = tensor * mask print(f"""Pruned layer {name}""" ) elif pruning_method == "topK": if "mask_scores" in name: continue A__ = name[:-6] A__ = model[f"""{prefix_}mask_scores"""] A__ = TopKBinarizer.apply(lowercase_ , lowercase_ ) A__ = tensor * mask print(f"""Pruned layer {name}""" ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue A__ = name[:-6] A__ = model[f"""{prefix_}mask_scores"""] A__ = ThresholdBinarizer.apply(lowercase_ , lowercase_ , lowercase_ ) A__ = tensor * mask print(f"""Pruned layer {name}""" ) elif pruning_method == "l0": if "mask_scores" in name: continue A__ = name[:-6] A__ = model[f"""{prefix_}mask_scores"""] A__ , A__ = -0.1, 1.1 A__ = torch.sigmoid(lowercase_ ) A__ = s * (r - l) + l A__ = s_bar.clamp(min=0.0 , max=1.0 ) A__ = tensor * mask print(f"""Pruned layer {name}""" ) else: raise ValueError('''Unknown pruning method''' ) if target_model_path is None: A__ = os.path.join( os.path.dirname(lowercase_ ) , f"""bertarized_{os.path.basename(lowercase_ )}""" ) if not os.path.isdir(lowercase_ ): shutil.copytree(lowercase_ , lowercase_ ) print(f"""\nCreated folder {target_model_path}""" ) torch.save(lowercase_ , os.path.join(lowercase_ , '''pytorch_model.bin''' ) ) print('''\nPruned model saved! See you later!''' ) if __name__ == "__main__": _lowerCamelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument( """--pruning_method""", choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""], type=str, required=True, help=( """Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,""" """ sigmoied_threshold = Soft movement pruning)""" ), ) parser.add_argument( """--threshold""", type=float, required=False, help=( """For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.""" """For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.""" """Not needed for `l0`""" ), ) parser.add_argument( """--model_name_or_path""", type=str, required=True, help="""Folder containing the model that was previously fine-pruned""", ) parser.add_argument( """--target_model_path""", default=None, type=str, required=False, help="""Folder containing the model that was previously fine-pruned""", ) _lowerCamelCase : int = parser.parse_args() main(args)
14
1
from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import ( LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, TextDataset, TextDatasetForNextSentencePrediction, ) from .squad import SquadDataset, SquadDataTrainingArguments
14
_lowerCamelCase : Optional[int] = 65521 def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int: """simple docstring""" A__ = 1 A__ = 0 for plain_chr in plain_text: A__ = (a + ord(lowercase_ )) % MOD_ADLER A__ = (b + a) % MOD_ADLER return (b << 16) | a
14
1
import argparse from typing import List import evaluate import numpy as np import torch from datasets import DatasetDict, load_dataset # New Code # # We'll be using StratifiedKFold for this example from sklearn.model_selection import StratifiedKFold from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to perform Cross Validation, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## _lowerCamelCase : List[str] = 16 _lowerCamelCase : int = 32 def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = 16 ) -> List[str]: """simple docstring""" A__ = AutoTokenizer.from_pretrained('''bert-base-cased''' ) A__ = DatasetDict( { '''train''': dataset['''train'''].select(lowercase_ ), '''validation''': dataset['''train'''].select(lowercase_ ), '''test''': dataset['''validation'''], } ) def tokenize_function(lowercase_ ): # max_length=None => use the model max length (it's actually the default) A__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowercase_ , max_length=lowercase_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): A__ = datasets.map( lowercase_ , batched=lowercase_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library A__ = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(lowercase_ ): # On TPU it's best to pad everything to the same length or training will be very slow. A__ = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": A__ = 16 elif accelerator.mixed_precision != "no": A__ = 8 else: A__ = None return tokenizer.pad( lowercase_ , padding='''longest''' , max_length=lowercase_ , pad_to_multiple_of=lowercase_ , return_tensors='''pt''' , ) # Instantiate dataloaders. A__ = DataLoader( tokenized_datasets['''train'''] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ ) A__ = DataLoader( tokenized_datasets['''validation'''] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ ) A__ = DataLoader( tokenized_datasets['''test'''] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ ) return train_dataloader, eval_dataloader, test_dataloader def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Union[str, Any]: """simple docstring""" A__ = [] # Download the dataset A__ = load_dataset('''glue''' , '''mrpc''' ) # Create our splits A__ = StratifiedKFold(n_splits=int(args.num_folds ) ) # Initialize accelerator A__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs A__ = config['''lr'''] A__ = int(config['''num_epochs'''] ) A__ = int(config['''seed'''] ) A__ = int(config['''batch_size'''] ) A__ = evaluate.load('''glue''' , '''mrpc''' ) # If the batch size is too big we use gradient accumulation A__ = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: A__ = batch_size // MAX_GPU_BATCH_SIZE A__ = MAX_GPU_BATCH_SIZE set_seed(lowercase_ ) # New Code # # Create our folds: A__ = kfold.split(np.zeros(datasets['''train'''].num_rows ) , datasets['''train''']['''label'''] ) A__ = [] # Iterate over them for i, (train_idxs, valid_idxs) in enumerate(lowercase_ ): A__ , A__ , A__ = get_fold_dataloaders( lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) A__ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=lowercase_ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). A__ = model.to(accelerator.device ) # Instantiate optimizer A__ = AdamW(params=model.parameters() , lr=lowercase_ ) # Instantiate scheduler A__ = get_linear_schedule_with_warmup( optimizer=lowercase_ , num_warmup_steps=100 , num_training_steps=(len(lowercase_ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. A__ , A__ , A__ , A__ , A__ = accelerator.prepare( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) # Now we train the model for epoch in range(lowercase_ ): model.train() for step, batch in enumerate(lowercase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) A__ = model(**lowercase_ ) A__ = outputs.loss A__ = loss / gradient_accumulation_steps accelerator.backward(lowercase_ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowercase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): A__ = model(**lowercase_ ) A__ = outputs.logits.argmax(dim=-1 ) A__ , A__ = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=lowercase_ , references=lowercase_ , ) A__ = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""" , lowercase_ ) # New Code # # We also run predictions on the test set at the very end A__ = [] for step, batch in enumerate(lowercase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): A__ = model(**lowercase_ ) A__ = outputs.logits A__ , A__ = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) fold_predictions.append(predictions.cpu() ) if i == 0: # We need all of the test predictions test_references.append(references.cpu() ) # Use accelerator.print to print only on the main process. test_predictions.append(torch.cat(lowercase_ , dim=0 ) ) # We now need to release all our memory and get rid of the current model, optimizer, etc accelerator.free_memory() # New Code # # Finally we check the accuracy of our folded results: A__ = torch.cat(lowercase_ , dim=0 ) A__ = torch.stack(lowercase_ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 ) A__ = metric.compute(predictions=lowercase_ , references=lowercase_ ) accelerator.print('''Average test metrics from all folds:''' , lowercase_ ) def SCREAMING_SNAKE_CASE ( ) -> Tuple: """simple docstring""" A__ = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=lowercase_ , default=lowercase_ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) # New Code # parser.add_argument('''--num_folds''' , type=lowercase_ , default=3 , help='''The number of splits to perform across the dataset''' ) A__ = parser.parse_args() A__ = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(lowercase_ , lowercase_ ) if __name__ == "__main__": main()
14
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer _lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) _lowerCamelCase : Tuple = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} _lowerCamelCase : Union[str, Any] = { """vocab_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } _lowerCamelCase : str = { """vocab_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } _lowerCamelCase : str = { """vocab_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json""" ), }, } _lowerCamelCase : Any = { """facebook/dpr-ctx_encoder-single-nq-base""": 512, """facebook/dpr-ctx_encoder-multiset-base""": 512, } _lowerCamelCase : List[str] = { """facebook/dpr-question_encoder-single-nq-base""": 512, """facebook/dpr-question_encoder-multiset-base""": 512, } _lowerCamelCase : Tuple = { """facebook/dpr-reader-single-nq-base""": 512, """facebook/dpr-reader-multiset-base""": 512, } _lowerCamelCase : Optional[Any] = { """facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True}, } _lowerCamelCase : Optional[int] = { """facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True}, } _lowerCamelCase : Optional[Any] = { """facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True}, } class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCAmelCase__ = DPRContextEncoderTokenizer class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCAmelCase__ = DPRQuestionEncoderTokenizer _lowerCamelCase : int = collections.namedtuple( """DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""] ) _lowerCamelCase : Any = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""]) _lowerCamelCase : Dict = r""" Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Return: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. """ @add_start_docstrings(UpperCAmelCase__ ) class UpperCamelCase_ : '''simple docstring''' def __call__( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Union[bool, str] = False , UpperCAmelCase__ : Union[bool, str] = False , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Optional[bool] = None , **UpperCAmelCase__ : Optional[int] , ) ->BatchEncoding: '''simple docstring''' if titles is None and texts is None: return super().__call__( UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , ) elif titles is None or texts is None: A__ = titles if texts is None else texts return super().__call__( UpperCAmelCase__ , UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , ) A__ = titles if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [titles] A__ = texts if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [texts] A__ = len(UpperCAmelCase__) A__ = questions if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [questions] * n_passages assert len(UpperCAmelCase__) == len( UpperCAmelCase__), f"""There should be as many titles than texts but got {len(UpperCAmelCase__)} titles and {len(UpperCAmelCase__)} texts.""" A__ = super().__call__(UpperCAmelCase__ , UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__)['''input_ids'''] A__ = super().__call__(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__)['''input_ids'''] A__ = { '''input_ids''': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(UpperCAmelCase__ , UpperCAmelCase__) ] } if return_attention_mask is not False: A__ = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids]) A__ = attention_mask return self.pad(UpperCAmelCase__ , padding=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : BatchEncoding , UpperCAmelCase__ : DPRReaderOutput , UpperCAmelCase__ : int = 16 , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : int = 4 , ) ->List[DPRSpanPrediction]: '''simple docstring''' A__ = reader_input['''input_ids'''] A__ , A__ , A__ = reader_output[:3] A__ = len(UpperCAmelCase__) A__ = sorted(range(UpperCAmelCase__) , reverse=UpperCAmelCase__ , key=relevance_logits.__getitem__) A__ = [] for doc_id in sorted_docs: A__ = list(input_ids[doc_id]) # assuming question & title information is at the beginning of the sequence A__ = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: A__ = sequence_ids.index(self.pad_token_id) else: A__ = len(UpperCAmelCase__) A__ = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCAmelCase__ , top_spans=UpperCAmelCase__ , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCAmelCase__ , start_index=UpperCAmelCase__ , end_index=UpperCAmelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1]) , )) if len(UpperCAmelCase__) >= num_spans: break return nbest_spans_predictions[:num_spans] def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , ) ->List[DPRSpanPrediction]: '''simple docstring''' A__ = [] for start_index, start_score in enumerate(UpperCAmelCase__): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]): scores.append(((start_index, start_index + answer_length), start_score + end_score)) A__ = sorted(UpperCAmelCase__ , key=lambda UpperCAmelCase__: x[1] , reverse=UpperCAmelCase__) A__ = [] for (start_index, end_index), score in scores: assert start_index <= end_index, f"""Wrong span indices: [{start_index}:{end_index}]""" A__ = end_index - start_index + 1 assert length <= max_answer_length, f"""Span is too long: {length} > {max_answer_length}""" if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals): continue chosen_span_intervals.append((start_index, end_index)) if len(UpperCAmelCase__) == top_spans: break return chosen_span_intervals @add_end_docstrings(UpperCAmelCase__ ) class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = READER_PRETRAINED_INIT_CONFIGURATION UpperCAmelCase__ = ['''input_ids''', '''attention_mask'''] UpperCAmelCase__ = DPRReaderTokenizer
14
1
import argparse import gc import json import os import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler _lowerCamelCase : Tuple = 16 _lowerCamelCase : Optional[int] = 32 def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Union[str, Any]: """simple docstring""" return int(x / 2**20 ) class UpperCamelCase_ : '''simple docstring''' def __enter__( self : Tuple) ->int: '''simple docstring''' gc.collect() torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero A__ = torch.cuda.memory_allocated() return self def __exit__( self : Optional[int] , *UpperCAmelCase__ : int) ->Dict: '''simple docstring''' gc.collect() torch.cuda.empty_cache() A__ = torch.cuda.memory_allocated() A__ = torch.cuda.max_memory_allocated() A__ = bamb(self.end - self.begin) A__ = bamb(self.peak - self.begin) # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}") def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ = 16 , lowercase_ = "bert-base-cased" , lowercase_ = 320 , lowercase_ = 160 , ) -> int: """simple docstring""" A__ = AutoTokenizer.from_pretrained(lowercase_ ) A__ = load_dataset( '''glue''' , '''mrpc''' , split={'''train''': f"""train[:{n_train}]""", '''validation''': f"""validation[:{n_val}]"""} ) def tokenize_function(lowercase_ ): # max_length=None => use the model max length (it's actually the default) A__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowercase_ , max_length=lowercase_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset A__ = datasets.map( lowercase_ , batched=lowercase_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=lowercase_ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library A__ = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(lowercase_ ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(lowercase_ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' ) return tokenizer.pad(lowercase_ , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. A__ = DataLoader( tokenized_datasets['''train'''] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ ) A__ = DataLoader( tokenized_datasets['''validation'''] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ ) return train_dataloader, eval_dataloader def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Tuple: """simple docstring""" A__ = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs A__ = config['''lr'''] A__ = int(config['''num_epochs'''] ) A__ = int(config['''seed'''] ) A__ = int(config['''batch_size'''] ) A__ = args.model_name_or_path set_seed(lowercase_ ) A__ , A__ = get_dataloaders(lowercase_ , lowercase_ , lowercase_ , args.n_train , args.n_val ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) A__ = AutoModelForSequenceClassification.from_pretrained(lowercase_ , return_dict=lowercase_ ) # Instantiate optimizer A__ = ( AdamW if accelerator.state.deepspeed_plugin is None or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) A__ = optimizer_cls(params=model.parameters() , lr=lowercase_ ) if accelerator.state.deepspeed_plugin is not None: A__ = accelerator.state.deepspeed_plugin.deepspeed_config[ '''gradient_accumulation_steps''' ] else: A__ = 1 A__ = (len(lowercase_ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): A__ = get_linear_schedule_with_warmup( optimizer=lowercase_ , num_warmup_steps=0 , num_training_steps=lowercase_ , ) else: A__ = DummyScheduler(lowercase_ , total_num_steps=lowercase_ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. A__ , A__ , A__ , A__ , A__ = accelerator.prepare( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) # We need to keep track of how many total steps we have iterated over A__ = 0 # We also need to keep track of the stating epoch so files are named properly A__ = 0 # Now we train the model A__ = {} for epoch in range(lowercase_ , lowercase_ ): with TorchTracemalloc() as tracemalloc: model.train() for step, batch in enumerate(lowercase_ ): A__ = model(**lowercase_ ) A__ = outputs.loss A__ = loss / gradient_accumulation_steps accelerator.backward(lowercase_ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage accelerator.print('''Memory before entering the train : {}'''.format(bamb(tracemalloc.begin ) ) ) accelerator.print('''Memory consumed at the end of the train (end-begin): {}'''.format(tracemalloc.used ) ) accelerator.print('''Peak Memory consumed during the train (max-begin): {}'''.format(tracemalloc.peaked ) ) accelerator.print( '''Total Peak Memory consumed during the train (max): {}'''.format( tracemalloc.peaked + bamb(tracemalloc.begin ) ) ) A__ = tracemalloc.peaked + bamb(tracemalloc.begin ) if args.peak_memory_upper_bound is not None: assert ( train_total_peak_memory[f"""epoch-{epoch}"""] <= args.peak_memory_upper_bound ), "Peak memory usage exceeded the upper bound" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , '''peak_memory_utilization.json''' ) , '''w''' ) as f: json.dump(lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE ( ) -> List[str]: """simple docstring""" A__ = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' ) parser.add_argument( '''--model_name_or_path''' , type=lowercase_ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=lowercase_ , ) parser.add_argument( '''--output_dir''' , type=lowercase_ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , ) parser.add_argument( '''--peak_memory_upper_bound''' , type=lowercase_ , default=lowercase_ , help='''The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.''' , ) parser.add_argument( '''--n_train''' , type=lowercase_ , default=320 , help='''Number of training examples to use.''' , ) parser.add_argument( '''--n_val''' , type=lowercase_ , default=160 , help='''Number of validation examples to use.''' , ) parser.add_argument( '''--num_epochs''' , type=lowercase_ , default=1 , help='''Number of train epochs.''' , ) A__ = parser.parse_args() A__ = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16} training_function(lowercase_ , lowercase_ ) if __name__ == "__main__": main()
14
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase : Any = logging.get_logger(__name__) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''encoder-decoder''' UpperCAmelCase__ = True def __init__( self : List[str] , **UpperCAmelCase__ : Union[str, Any]) ->List[Any]: '''simple docstring''' super().__init__(**UpperCAmelCase__) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" A__ = kwargs.pop('''encoder''') A__ = encoder_config.pop('''model_type''') A__ = kwargs.pop('''decoder''') A__ = decoder_config.pop('''model_type''') from ..auto.configuration_auto import AutoConfig A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__) A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__) A__ = True @classmethod def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : Union[str, Any]) ->PretrainedConfig: '''simple docstring''' logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''') A__ = True A__ = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]: '''simple docstring''' A__ = copy.deepcopy(self.__dict__) A__ = self.encoder.to_dict() A__ = self.decoder.to_dict() A__ = self.__class__.model_type return output
14
1
import json import re from typing import TYPE_CHECKING, List, Optional, Tuple, Union import numpy as np from ...utils import is_tf_available, is_torch_available, logging if TYPE_CHECKING: if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_codegen import CodeGenTokenizer _lowerCamelCase : Dict = logging.get_logger(__name__) _lowerCamelCase : List[str] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} _lowerCamelCase : Optional[int] = { """vocab_file""": { """Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json""", }, """merges_file""": { """Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt""", }, """tokenizer_file""": { """Salesforce/codegen-350M-mono""": ( """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json""" ), }, } _lowerCamelCase : List[Any] = { """Salesforce/codegen-350M-mono""": 2048, } class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = ['''input_ids''', '''attention_mask'''] UpperCAmelCase__ = CodeGenTokenizer def __init__( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Any="<|endoftext|>" , UpperCAmelCase__ : Optional[int]="<|endoftext|>" , UpperCAmelCase__ : Tuple="<|endoftext|>" , UpperCAmelCase__ : Optional[int]=False , **UpperCAmelCase__ : Tuple , ) ->Dict: '''simple docstring''' super().__init__( UpperCAmelCase__ , UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , **UpperCAmelCase__ , ) if kwargs.pop('''add_bos_token''' , UpperCAmelCase__): A__ = kwargs.pop('''name_or_path''' , '''''') raise ValueError( '''Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.''' '''Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n''' f"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n""" f"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n""" '''This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.''' ''' so that the fast tokenizer works correctly.''') A__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get('''add_prefix_space''' , UpperCAmelCase__) != add_prefix_space: A__ = getattr(UpperCAmelCase__ , pre_tok_state.pop('''type''')) A__ = add_prefix_space A__ = pre_tok_class(**UpperCAmelCase__) A__ = add_prefix_space def SCREAMING_SNAKE_CASE ( self : Tuple , *UpperCAmelCase__ : int , **UpperCAmelCase__ : Optional[int]) ->BatchEncoding: '''simple docstring''' A__ = kwargs.get('''is_split_into_words''' , UpperCAmelCase__) assert self.add_prefix_space or not is_split_into_words, ( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Any , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Dict) ->BatchEncoding: '''simple docstring''' A__ = kwargs.get('''is_split_into_words''' , UpperCAmelCase__) assert self.add_prefix_space or not is_split_into_words, ( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None) ->Tuple[str]: '''simple docstring''' A__ = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__) return tuple(UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[List[str]] = None , **UpperCAmelCase__ : str , ) ->str: '''simple docstring''' A__ = super().decode( token_ids=UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ , **UpperCAmelCase__ , ) if truncate_before_pattern is not None and len(UpperCAmelCase__) > 0: A__ = self.truncate(UpperCAmelCase__ , UpperCAmelCase__) return decoded_text def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int]) ->Tuple: '''simple docstring''' def find_re(UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any]): A__ = pattern.search(UpperCAmelCase__ , UpperCAmelCase__) return m.start() if m else -1 A__ = [re.compile(UpperCAmelCase__ , re.MULTILINE) for pattern in truncate_before_pattern] A__ = list(re.finditer('''^print''' , UpperCAmelCase__ , re.MULTILINE)) if len(UpperCAmelCase__) > 1: A__ = completion[: prints[1].start()] A__ = list(re.finditer('''^def''' , UpperCAmelCase__ , re.MULTILINE)) if len(UpperCAmelCase__) > 1: A__ = completion[: defs[1].start()] A__ = 0 A__ = [ pos for pos in [find_re(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__) for terminal in terminals] if pos != -1 ] if len(UpperCAmelCase__) > 0: return completion[: min(UpperCAmelCase__)] else: return completion
14
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any: """simple docstring""" A__ = [0] * len(lowercase_ ) A__ = [] A__ = [1] * len(lowercase_ ) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(lowercase_ ) ): if indegree[i] == 0: queue.append(lowercase_ ) while queue: A__ = queue.pop(0 ) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: A__ = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(lowercase_ ) print(max(lowercase_ ) ) # Adjacency list of Graph _lowerCamelCase : Optional[int] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
14
1
import numpy as np from numpy import ndarray from scipy.optimize import Bounds, LinearConstraint, minimize def SCREAMING_SNAKE_CASE ( lowercase_ ) -> float: """simple docstring""" return np.dot(lowercase_ , lowercase_ ) class UpperCamelCase_ : '''simple docstring''' def __init__( self : Union[str, Any] , *, UpperCAmelCase__ : float = np.inf , UpperCAmelCase__ : str = "linear" , UpperCAmelCase__ : float = 0.0 , ) ->None: '''simple docstring''' A__ = regularization A__ = gamma if kernel == "linear": A__ = self.__linear elif kernel == "rbf": if self.gamma == 0: raise ValueError('''rbf kernel requires gamma''') if not isinstance(self.gamma , (float, int)): raise ValueError('''gamma must be float or int''') if not self.gamma > 0: raise ValueError('''gamma must be > 0''') A__ = self.__rbf # in the future, there could be a default value like in sklearn # sklear: def_gamma = 1/(n_features * X.var()) (wiki) # previously it was 1/(n_features) else: A__ = f"""Unknown kernel: {kernel}""" raise ValueError(UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : ndarray , UpperCAmelCase__ : ndarray) ->float: '''simple docstring''' return np.dot(UpperCAmelCase__ , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : ndarray , UpperCAmelCase__ : ndarray) ->float: '''simple docstring''' return np.exp(-(self.gamma * norm_squared(vectora - vectora))) def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : list[ndarray] , UpperCAmelCase__ : ndarray) ->None: '''simple docstring''' A__ = observations A__ = classes # using Wolfe's Dual to calculate w. # Primal problem: minimize 1/2*norm_squared(w) # constraint: yn(w . xn + b) >= 1 # # With l a vector # Dual problem: maximize sum_n(ln) - # 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm)) # constraint: self.C >= ln >= 0 # and sum_n(ln*yn) = 0 # Then we get w using w = sum_n(ln*yn*xn) # At the end we can get b ~= mean(yn - w . xn) # # Since we use kernels, we only need l_star to calculate b # and to classify observations ((A__) , ) = np.shape(UpperCAmelCase__) def to_minimize(UpperCAmelCase__ : ndarray) -> float: A__ = 0 ((A__) , ) = np.shape(UpperCAmelCase__) for i in range(UpperCAmelCase__): for j in range(UpperCAmelCase__): s += ( candidate[i] * candidate[j] * classes[i] * classes[j] * self.kernel(observations[i] , observations[j]) ) return 1 / 2 * s - sum(UpperCAmelCase__) A__ = LinearConstraint(UpperCAmelCase__ , 0 , 0) A__ = Bounds(0 , self.regularization) A__ = minimize( UpperCAmelCase__ , np.ones(UpperCAmelCase__) , bounds=UpperCAmelCase__ , constraints=[ly_contraint]).x A__ = l_star # calculating mean offset of separation plane to points A__ = 0 for i in range(UpperCAmelCase__): for j in range(UpperCAmelCase__): s += classes[i] - classes[i] * self.optimum[i] * self.kernel( observations[i] , observations[j]) A__ = s / n def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : ndarray) ->int: '''simple docstring''' A__ = sum( self.optimum[n] * self.classes[n] * self.kernel(self.observations[n] , UpperCAmelCase__) for n in range(len(self.classes))) return 1 if s + self.offset >= 0 else -1 if __name__ == "__main__": import doctest doctest.testmod()
14
import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline _lowerCamelCase : Optional[Any] = datasets.utils.logging.get_logger(__name__) @dataclass class UpperCamelCase_ ( datasets.BuilderConfig ): '''simple docstring''' UpperCAmelCase__ = None UpperCAmelCase__ = "utf-8" UpperCAmelCase__ = None UpperCAmelCase__ = None UpperCAmelCase__ = True # deprecated UpperCAmelCase__ = None # deprecated UpperCAmelCase__ = 10 << 20 # 10MB UpperCAmelCase__ = None class UpperCamelCase_ ( datasets.ArrowBasedBuilder ): '''simple docstring''' UpperCAmelCase__ = JsonConfig def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str: '''simple docstring''' if self.config.block_size is not None: logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''') A__ = self.config.block_size if self.config.use_threads is not True: logger.warning( '''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''') if self.config.newlines_in_values is not None: raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''') return datasets.DatasetInfo(features=self.config.features) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : List[Any]) ->Dict: '''simple docstring''' if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""") A__ = dl_manager.download_and_extract(self.config.data_files) if isinstance(UpperCAmelCase__ , (str, list, tuple)): A__ = data_files if isinstance(UpperCAmelCase__ , UpperCAmelCase__): A__ = [files] A__ = [dl_manager.iter_files(UpperCAmelCase__) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files})] A__ = [] for split_name, files in data_files.items(): if isinstance(UpperCAmelCase__ , UpperCAmelCase__): A__ = [files] A__ = [dl_manager.iter_files(UpperCAmelCase__) for file in files] splits.append(datasets.SplitGenerator(name=UpperCAmelCase__ , gen_kwargs={'''files''': files})) return splits def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : pa.Table) ->pa.Table: '''simple docstring''' if self.config.features is not None: # adding missing columns for column_name in set(self.config.features) - set(pa_table.column_names): A__ = self.config.features.arrow_schema.field(UpperCAmelCase__).type A__ = pa_table.append_column(UpperCAmelCase__ , pa.array([None] * len(UpperCAmelCase__) , type=UpperCAmelCase__)) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example A__ = table_cast(UpperCAmelCase__ , self.config.features.arrow_schema) return pa_table def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Tuple) ->str: '''simple docstring''' for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase__)): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(UpperCAmelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f: A__ = json.load(UpperCAmelCase__) # We keep only the field we are interested in A__ = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(UpperCAmelCase__ , (list, tuple)): A__ = set().union(*[row.keys() for row in dataset]) A__ = {col: [row.get(UpperCAmelCase__) for row in dataset] for col in keys} else: A__ = dataset A__ = pa.Table.from_pydict(UpperCAmelCase__) yield file_idx, self._cast_table(UpperCAmelCase__) # If the file has one json object per line else: with open(UpperCAmelCase__ , '''rb''') as f: A__ = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small A__ = max(self.config.chunksize // 32 , 16 << 10) A__ = ( self.config.encoding_errors if self.config.encoding_errors is not None else '''strict''' ) while True: A__ = f.read(self.config.chunksize) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(UpperCAmelCase__) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": A__ = batch.decode(self.config.encoding , errors=UpperCAmelCase__).encode('''utf-8''') try: while True: try: A__ = paj.read_json( io.BytesIO(UpperCAmelCase__) , read_options=paj.ReadOptions(block_size=UpperCAmelCase__)) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(UpperCAmelCase__ , pa.ArrowInvalid) and "straddling" not in str(UpperCAmelCase__) or block_size > len(UpperCAmelCase__) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( f"""Batch of {len(UpperCAmelCase__)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""") block_size *= 2 except pa.ArrowInvalid as e: try: with open( UpperCAmelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f: A__ = json.load(UpperCAmelCase__) except json.JSONDecodeError: logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""") raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(UpperCAmelCase__ , UpperCAmelCase__): # list is the only sequence type supported in JSON try: A__ = set().union(*[row.keys() for row in dataset]) A__ = {col: [row.get(UpperCAmelCase__) for row in dataset] for col in keys} A__ = pa.Table.from_pydict(UpperCAmelCase__) except (pa.ArrowInvalid, AttributeError) as e: logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""") raise ValueError(f"""Not able to read records in the JSON file at {file}.""") from None yield file_idx, self._cast_table(UpperCAmelCase__) break else: logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""") raise ValueError( f"""Not able to read records in the JSON file at {file}. """ f"""You should probably indicate the field of the JSON file containing your records. """ f"""This JSON file contain the following fields: {str(list(dataset.keys()))}. """ f"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase__) batch_idx += 1
14
1
import json import os import tempfile import transformers import datasets from utils import generate_example_dataset, get_duration _lowerCamelCase : Any = 500000 _lowerCamelCase , _lowerCamelCase : List[Any] = os.path.split(__file__) _lowerCamelCase : Tuple = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json""")) @get_duration def SCREAMING_SNAKE_CASE ( lowercase_ , **lowercase_ ) -> str: """simple docstring""" A__ = dataset.map(**lowercase_ ) @get_duration def SCREAMING_SNAKE_CASE ( lowercase_ , **lowercase_ ) -> Optional[Any]: """simple docstring""" A__ = dataset.filter(**lowercase_ ) def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]: """simple docstring""" A__ = {'''num examples''': SPEED_TEST_N_EXAMPLES} with tempfile.TemporaryDirectory() as tmp_dir: A__ = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} ) A__ = generate_example_dataset( os.path.join(lowercase_ , '''dataset.arrow''' ) , lowercase_ , num_examples=lowercase_ ) A__ = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=lowercase_ ) def tokenize(lowercase_ ): return tokenizer(examples['''text'''] ) A__ = map(lowercase_ ) A__ = map(lowercase_ , batched=lowercase_ ) A__ = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ ) with dataset.formatted_as(type='''numpy''' ): A__ = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ ) with dataset.formatted_as(type='''pandas''' ): A__ = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ ) with dataset.formatted_as(type='''torch''' , columns='''numbers''' ): A__ = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ ) with dataset.formatted_as(type='''tensorflow''' , columns='''numbers''' ): A__ = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ ) A__ = map(lowercase_ , function=lowercase_ , batched=lowercase_ ) A__ = filter(lowercase_ ) # Activate later when tokenizer support batched inputs # with dataset.formatted_as(type='numpy'): # times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True) with open(lowercase_ , '''wb''' ) as f: f.write(json.dumps(lowercase_ ).encode('''utf-8''' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_map_filter()
14
import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch _lowerCamelCase : List[Any] = """sshleifer/bart-tiny-random""" _lowerCamelCase : List[Any] = """patrickvonplaten/t5-tiny-random""" @require_torch class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int: '''simple docstring''' return AutoConfig.from_pretrained(UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Any: '''simple docstring''' A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1) self.assertEqual(student.config.num_hidden_layers , 1) def SCREAMING_SNAKE_CASE ( self : int) ->Any: '''simple docstring''' A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]: '''simple docstring''' A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase__) self.assertEqual(student.config.encoder_layers , 1) self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers) def SCREAMING_SNAKE_CASE ( self : Dict) ->int: '''simple docstring''' A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1) self.assertEqual(student.config.encoder_layers , 1) self.assertEqual(student.config.decoder_layers , 1) def SCREAMING_SNAKE_CASE ( self : str) ->List[Any]: '''simple docstring''' with self.assertRaises(UpperCAmelCase__): create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=UpperCAmelCase__ , d=UpperCAmelCase__)
14
1
from math import ceil def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Tuple: """simple docstring""" A__ = list(range(0 , lowercase_ ) ) A__ = [item for sublist in list(device_map.values() ) for item in sublist] # Duplicate check A__ = [] for i in device_map_blocks: if device_map_blocks.count(lowercase_ ) > 1 and i not in duplicate_blocks: duplicate_blocks.append(lowercase_ ) # Missing blocks A__ = [i for i in blocks if i not in device_map_blocks] A__ = [i for i in device_map_blocks if i not in blocks] if len(lowercase_ ) != 0: raise ValueError( '''Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.''' ''' These attention blocks were specified more than once: ''' + str(lowercase_ ) ) if len(lowercase_ ) != 0: raise ValueError( '''There are attention blocks for this model that are not specified in the device_map. Add these attention ''' '''blocks to a device on the device_map: ''' + str(lowercase_ ) ) if len(lowercase_ ) != 0: raise ValueError( '''The device_map contains more attention blocks than this model has. Remove these from the device_map:''' + str(lowercase_ ) ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> List[str]: """simple docstring""" A__ = list(range(lowercase_ ) ) A__ = int(ceil(n_layers / len(lowercase_ ) ) ) A__ = [layers[i : i + n_blocks] for i in range(0 , lowercase_ , lowercase_ )] return dict(zip(lowercase_ , lowercase_ ) )
14
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : Dict=3 , UpperCAmelCase__ : List[Any]=30 , UpperCAmelCase__ : Any=400 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Optional[Any]=[0.5, 0.5, 0.5] , UpperCAmelCase__ : Any=[0.5, 0.5, 0.5] , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Optional[int]=1 / 255 , UpperCAmelCase__ : Optional[Any]=True , ) ->str: '''simple docstring''' A__ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333} A__ = parent A__ = batch_size A__ = num_channels A__ = min_resolution A__ = max_resolution A__ = do_resize A__ = size A__ = do_normalize A__ = image_mean A__ = image_std A__ = do_rescale A__ = rescale_factor A__ = do_pad def SCREAMING_SNAKE_CASE ( self : Any) ->List[str]: '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int=False) ->Optional[Any]: '''simple docstring''' if not batched: A__ = image_inputs[0] if isinstance(UpperCAmelCase__ , Image.Image): A__ , A__ = image.size else: A__ , A__ = image.shape[1], image.shape[2] if w < h: A__ = int(self.size['''shortest_edge'''] * h / w) A__ = self.size['''shortest_edge'''] elif w > h: A__ = self.size['''shortest_edge'''] A__ = int(self.size['''shortest_edge'''] * w / h) else: A__ = self.size['''shortest_edge'''] A__ = self.size['''shortest_edge'''] else: A__ = [] for image in image_inputs: A__ , A__ = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) A__ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__: item[0])[0] A__ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__: item[1])[1] return expected_height, expected_width @require_torch @require_vision class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = DeformableDetrImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple: '''simple docstring''' A__ = DeformableDetrImageProcessingTester(self) @property def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(UpperCAmelCase__ , '''image_mean''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''image_std''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_normalize''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_rescale''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_pad''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''size''')) def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int: '''simple docstring''' A__ = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1_333}) self.assertEqual(image_processor.do_pad , UpperCAmelCase__) A__ = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCAmelCase__) self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84}) self.assertEqual(image_processor.do_pad , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Any) ->List[str]: '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) # create random PIL images A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , Image.Image) # Test not batched input A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__) A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , np.ndarray) # Test not batched input A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE ( self : int) ->Tuple: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , torch.Tensor) # Test not batched input A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[str]: '''simple docstring''' A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''') with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''') as f: A__ = json.loads(f.read()) A__ = {'''image_id''': 39_769, '''annotations''': target} # encode them A__ = DeformableDetrImageProcessor() A__ = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , return_tensors='''pt''') # verify pixel values A__ = torch.Size([1, 3, 800, 1_066]) self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase__) A__ = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase__ , atol=1e-4)) # verify area A__ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase__)) # verify boxes A__ = torch.Size([6, 4]) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase__) A__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase__ , atol=1e-3)) # verify image_id A__ = torch.tensor([39_769]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase__)) # verify is_crowd A__ = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase__)) # verify class_labels A__ = torch.tensor([75, 75, 63, 65, 17, 17]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase__)) # verify orig_size A__ = torch.tensor([480, 640]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase__)) # verify size A__ = torch.tensor([800, 1_066]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase__)) @slow def SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[int]: '''simple docstring''' A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''') with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''') as f: A__ = json.loads(f.read()) A__ = {'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target} A__ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''') # encode them A__ = DeformableDetrImageProcessor(format='''coco_panoptic''') A__ = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , masks_path=UpperCAmelCase__ , return_tensors='''pt''') # verify pixel values A__ = torch.Size([1, 3, 800, 1_066]) self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase__) A__ = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase__ , atol=1e-4)) # verify area A__ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase__)) # verify boxes A__ = torch.Size([6, 4]) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase__) A__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase__ , atol=1e-3)) # verify image_id A__ = torch.tensor([39_769]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase__)) # verify is_crowd A__ = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase__)) # verify class_labels A__ = torch.tensor([17, 17, 63, 75, 75, 93]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase__)) # verify masks A__ = 822_873 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCAmelCase__) # verify orig_size A__ = torch.tensor([480, 640]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase__)) # verify size A__ = torch.tensor([800, 1_066]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase__))
14
1
# Lint as: python3 import dataclasses import re from dataclasses import dataclass from functools import total_ordering from typing import Optional, Union _lowerCamelCase : Tuple = re.compile(r"""^(?P<major>\d+)""" r"""\.(?P<minor>\d+)""" r"""\.(?P<patch>\d+)$""") @total_ordering @dataclass class UpperCamelCase_ : '''simple docstring''' UpperCAmelCase__ = 42 UpperCAmelCase__ = None UpperCAmelCase__ = None UpperCAmelCase__ = None UpperCAmelCase__ = None def SCREAMING_SNAKE_CASE ( self : Any) ->Any: '''simple docstring''' A__ , A__ , A__ = _str_to_version_tuple(self.version_str) def __repr__( self : Dict) ->List[str]: '''simple docstring''' return f"""{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}""" @property def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[str]: '''simple docstring''' return self.major, self.minor, self.patch def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : Union[str, Any]) ->List[Any]: '''simple docstring''' if isinstance(UpperCAmelCase__ , UpperCAmelCase__): return Version(UpperCAmelCase__) elif isinstance(UpperCAmelCase__ , UpperCAmelCase__): return other raise TypeError(f"""{other} (type {type(UpperCAmelCase__)}) cannot be compared to version.""") def __eq__( self : Tuple , UpperCAmelCase__ : int) ->Union[str, Any]: '''simple docstring''' try: A__ = self._validate_operand(UpperCAmelCase__) except (TypeError, ValueError): return False else: return self.tuple == other.tuple def __lt__( self : List[Any] , UpperCAmelCase__ : Union[str, Any]) ->List[str]: '''simple docstring''' A__ = self._validate_operand(UpperCAmelCase__) return self.tuple < other.tuple def __hash__( self : int) ->Any: '''simple docstring''' return hash(_version_tuple_to_str(self.tuple)) @classmethod def SCREAMING_SNAKE_CASE ( cls : Optional[int] , UpperCAmelCase__ : int) ->List[str]: '''simple docstring''' A__ = {f.name for f in dataclasses.fields(cls)} return cls(**{k: v for k, v in dic.items() if k in field_names}) def SCREAMING_SNAKE_CASE ( self : Dict) ->str: '''simple docstring''' return self.version_str def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Union[str, Any]: """simple docstring""" A__ = _VERSION_REG.match(lowercase_ ) if not res: raise ValueError(f"""Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits.""" ) return tuple(int(lowercase_ ) for v in [res.group('''major''' ), res.group('''minor''' ), res.group('''patch''' )] ) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Union[str, Any]: """simple docstring""" return ".".join(str(lowercase_ ) for v in version_tuple )
14
from __future__ import annotations import typing from collections.abc import Iterable import numpy as np _lowerCamelCase : str = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007 _lowerCamelCase : Tuple = typing.Union[np.floataa, int, float] # noqa: UP007 def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> VectorOut: """simple docstring""" return np.sqrt(np.sum((np.asarray(lowercase_ ) - np.asarray(lowercase_ )) ** 2 ) ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> VectorOut: """simple docstring""" return sum((va - va) ** 2 for va, va in zip(lowercase_ , lowercase_ ) ) ** (1 / 2) if __name__ == "__main__": def SCREAMING_SNAKE_CASE ( ) -> None: """simple docstring""" from timeit import timeit print('''Without Numpy''' ) print( timeit( '''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ) ) print('''With Numpy''' ) print( timeit( '''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ) ) benchmark()
14
1
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str: """simple docstring""" A__ = '''''' for word_or_phrase in separated: if not isinstance(lowercase_ , lowercase_ ): raise Exception('''join() accepts only strings to be joined''' ) joined += word_or_phrase + separator return joined.strip(lowercase_ ) if __name__ == "__main__": from doctest import testmod testmod()
14
from ...processing_utils import ProcessorMixin class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''SpeechT5FeatureExtractor''' UpperCAmelCase__ = '''SpeechT5Tokenizer''' def __init__( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple) ->Union[str, Any]: '''simple docstring''' super().__init__(UpperCAmelCase__ , UpperCAmelCase__) def __call__( self : Dict , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Any) ->Optional[Any]: '''simple docstring''' A__ = kwargs.pop('''audio''' , UpperCAmelCase__) A__ = kwargs.pop('''text''' , UpperCAmelCase__) A__ = kwargs.pop('''text_target''' , UpperCAmelCase__) A__ = kwargs.pop('''audio_target''' , UpperCAmelCase__) A__ = kwargs.pop('''sampling_rate''' , UpperCAmelCase__) if audio is not None and text is not None: raise ValueError( '''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''') if audio_target is not None and text_target is not None: raise ValueError( '''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''') if audio is None and audio_target is None and text is None and text_target is None: raise ValueError( '''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''') if audio is not None: A__ = self.feature_extractor(UpperCAmelCase__ , *UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , **UpperCAmelCase__) elif text is not None: A__ = self.tokenizer(UpperCAmelCase__ , **UpperCAmelCase__) else: A__ = None if audio_target is not None: A__ = self.feature_extractor(audio_target=UpperCAmelCase__ , *UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , **UpperCAmelCase__) A__ = targets['''input_values'''] elif text_target is not None: A__ = self.tokenizer(UpperCAmelCase__ , **UpperCAmelCase__) A__ = targets['''input_ids'''] else: A__ = None if inputs is None: return targets if targets is not None: A__ = labels A__ = targets.get('''attention_mask''') if decoder_attention_mask is not None: A__ = decoder_attention_mask return inputs def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : int) ->Optional[int]: '''simple docstring''' A__ = kwargs.pop('''input_values''' , UpperCAmelCase__) A__ = kwargs.pop('''input_ids''' , UpperCAmelCase__) A__ = kwargs.pop('''labels''' , UpperCAmelCase__) if input_values is not None and input_ids is not None: raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''') if input_values is None and input_ids is None and labels is None: raise ValueError( '''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''') if input_values is not None: A__ = self.feature_extractor.pad(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__) elif input_ids is not None: A__ = self.tokenizer.pad(UpperCAmelCase__ , **UpperCAmelCase__) else: A__ = None if labels is not None: if "input_ids" in labels or (isinstance(UpperCAmelCase__ , UpperCAmelCase__) and "input_ids" in labels[0]): A__ = self.tokenizer.pad(UpperCAmelCase__ , **UpperCAmelCase__) A__ = targets['''input_ids'''] else: A__ = self.feature_extractor.feature_size A__ = self.feature_extractor.num_mel_bins A__ = self.feature_extractor.pad(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__) A__ = feature_size_hack A__ = targets['''input_values'''] else: A__ = None if inputs is None: return targets if targets is not None: A__ = labels A__ = targets.get('''attention_mask''') if decoder_attention_mask is not None: A__ = decoder_attention_mask return inputs def SCREAMING_SNAKE_CASE ( self : Any , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Optional[Any]) ->Optional[Any]: '''simple docstring''' return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Union[str, Any]) ->Dict: '''simple docstring''' return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__)
14
1
import colorsys from PIL import Image # type: ignore def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> float: """simple docstring""" A__ = x A__ = y for step in range(lowercase_ ): # noqa: B007 A__ = a * a - b * b + x A__ = 2 * a * b + y A__ = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> tuple: """simple docstring""" if distance == 1: return (0, 0, 0) else: return (255, 255, 255) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> tuple: """simple docstring""" if distance == 1: return (0, 0, 0) else: return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(lowercase_ , 1 , 1 ) ) def SCREAMING_SNAKE_CASE ( lowercase_ = 800 , lowercase_ = 600 , lowercase_ = -0.6 , lowercase_ = 0 , lowercase_ = 3.2 , lowercase_ = 50 , lowercase_ = True , ) -> Image.Image: """simple docstring""" A__ = Image.new('''RGB''' , (image_width, image_height) ) A__ = img.load() # loop through the image-coordinates for image_x in range(lowercase_ ): for image_y in range(lowercase_ ): # determine the figure-coordinates based on the image-coordinates A__ = figure_width / image_width * image_height A__ = figure_center_x + (image_x / image_width - 0.5) * figure_width A__ = figure_center_y + (image_y / image_height - 0.5) * figure_height A__ = get_distance(lowercase_ , lowercase_ , lowercase_ ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: A__ = get_color_coded_rgb(lowercase_ ) else: A__ = get_black_and_white_rgb(lowercase_ ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure _lowerCamelCase : List[str] = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
14
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase : Tuple = logging.get_logger(__name__) _lowerCamelCase : str = { """microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""", } class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''git_vision_model''' def __init__( self : Any , UpperCAmelCase__ : Any=768 , UpperCAmelCase__ : int=3_072 , UpperCAmelCase__ : List[str]=12 , UpperCAmelCase__ : Dict=12 , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : List[Any]=224 , UpperCAmelCase__ : Union[str, Any]=16 , UpperCAmelCase__ : Union[str, Any]="quick_gelu" , UpperCAmelCase__ : Dict=1e-5 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Any=0.02 , **UpperCAmelCase__ : Any , ) ->Optional[int]: '''simple docstring''' super().__init__(**UpperCAmelCase__) A__ = hidden_size A__ = intermediate_size A__ = num_hidden_layers A__ = num_attention_heads A__ = num_channels A__ = patch_size A__ = image_size A__ = initializer_range A__ = attention_dropout A__ = layer_norm_eps A__ = hidden_act @classmethod def SCREAMING_SNAKE_CASE ( cls : Any , UpperCAmelCase__ : Union[str, os.PathLike] , **UpperCAmelCase__ : int) ->"PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(UpperCAmelCase__) A__ , A__ = cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__) # get the vision config dict if we are loading from GITConfig if config_dict.get('''model_type''') == "git": A__ = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""") return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''git''' def __init__( self : Dict , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int=30_522 , UpperCAmelCase__ : Optional[int]=768 , UpperCAmelCase__ : Dict=6 , UpperCAmelCase__ : int=12 , UpperCAmelCase__ : List[str]=3_072 , UpperCAmelCase__ : str="gelu" , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : List[Any]=1_024 , UpperCAmelCase__ : List[str]=0.02 , UpperCAmelCase__ : Any=1e-12 , UpperCAmelCase__ : Union[str, Any]=0 , UpperCAmelCase__ : List[Any]="absolute" , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : int=101 , UpperCAmelCase__ : Tuple=102 , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : List[str] , ) ->Any: '''simple docstring''' super().__init__(bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__) if vision_config is None: A__ = {} logger.info('''vision_config is None. initializing the GitVisionConfig with default values.''') A__ = GitVisionConfig(**UpperCAmelCase__) A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = hidden_act A__ = intermediate_size A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = initializer_range A__ = layer_norm_eps A__ = position_embedding_type A__ = use_cache A__ = tie_word_embeddings A__ = num_image_with_embedding A__ = bos_token_id A__ = eos_token_id def SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]: '''simple docstring''' A__ = copy.deepcopy(self.__dict__) A__ = self.vision_config.to_dict() A__ = self.__class__.model_type return output
14
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL _lowerCamelCase : List[Any] = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[List[ImageInput]]: """simple docstring""" if isinstance(lowercase_ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(lowercase_ , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(lowercase_ ): return [[videos]] raise ValueError(f"""Could not make batched video from {videos}""" ) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = ['''pixel_values'''] def __init__( self : Any , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 255 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , **UpperCAmelCase__ : Tuple , ) ->None: '''simple docstring''' super().__init__(**UpperCAmelCase__) A__ = size if size is not None else {'''shortest_edge''': 224} A__ = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__) A__ = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} A__ = get_size_dict(UpperCAmelCase__ , param_name='''crop_size''') A__ = do_resize A__ = size A__ = do_center_crop A__ = crop_size A__ = resample A__ = do_rescale A__ = rescale_factor A__ = do_normalize A__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN A__ = image_std if image_std is not None else IMAGENET_STANDARD_STD def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Tuple , ) ->np.ndarray: '''simple docstring''' A__ = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__) if "shortest_edge" in size: A__ = get_resize_output_image_size(UpperCAmelCase__ , size['''shortest_edge'''] , default_to_square=UpperCAmelCase__) elif "height" in size and "width" in size: A__ = (size['''height'''], size['''width''']) else: raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""") return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Union[str, Any] , ) ->np.ndarray: '''simple docstring''' A__ = get_size_dict(UpperCAmelCase__) if "height" not in size or "width" not in size: raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""") return center_crop(UpperCAmelCase__ , size=(size['''height'''], size['''width''']) , data_format=UpperCAmelCase__ , **UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[int, float] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[Any] , ) ->Union[str, Any]: '''simple docstring''' return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : List[Any] , ) ->np.ndarray: '''simple docstring''' return normalize(UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : float = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[ChannelDimension] = ChannelDimension.FIRST , ) ->np.ndarray: '''simple docstring''' if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''') if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''') if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''') if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''') # All transformations expect numpy arrays. A__ = to_numpy_array(UpperCAmelCase__) if do_resize: A__ = self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__) if do_center_crop: A__ = self.center_crop(UpperCAmelCase__ , size=UpperCAmelCase__) if do_rescale: A__ = self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__) if do_normalize: A__ = self.normalize(image=UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__) A__ = to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__) return image def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : float = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase__ : Optional[Any] , ) ->PIL.Image.Image: '''simple docstring''' A__ = do_resize if do_resize is not None else self.do_resize A__ = resample if resample is not None else self.resample A__ = do_center_crop if do_center_crop is not None else self.do_center_crop A__ = do_rescale if do_rescale is not None else self.do_rescale A__ = rescale_factor if rescale_factor is not None else self.rescale_factor A__ = do_normalize if do_normalize is not None else self.do_normalize A__ = image_mean if image_mean is not None else self.image_mean A__ = image_std if image_std is not None else self.image_std A__ = size if size is not None else self.size A__ = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__) A__ = crop_size if crop_size is not None else self.crop_size A__ = get_size_dict(UpperCAmelCase__ , param_name='''crop_size''') if not valid_images(UpperCAmelCase__): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''') A__ = make_batched(UpperCAmelCase__) A__ = [ [ self._preprocess_image( image=UpperCAmelCase__ , do_resize=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , do_center_crop=UpperCAmelCase__ , crop_size=UpperCAmelCase__ , do_rescale=UpperCAmelCase__ , rescale_factor=UpperCAmelCase__ , do_normalize=UpperCAmelCase__ , image_mean=UpperCAmelCase__ , image_std=UpperCAmelCase__ , data_format=UpperCAmelCase__ , ) for img in video ] for video in videos ] A__ = {'''pixel_values''': videos} return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__)
14
import requests from bsa import BeautifulSoup def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str: """simple docstring""" A__ = BeautifulSoup(requests.get(lowercase_ , params=lowercase_ ).content , '''html.parser''' ) A__ = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} ) A__ = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' ) return anchors[2].get_text() if __name__ == "__main__": _lowerCamelCase : Optional[Any] = { """title""": ( """Precisely geometry controlled microsupercapacitors for ultrahigh areal """ """capacitance, volumetric capacitance, and energy density""" ), """journal""": """Chem. Mater.""", """volume""": 30, """pages""": """3979-3990""", """year""": 2018, """hl""": """en""", } print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params))
14
1
import inspect import unittest from typing import List import numpy as np from transformers import EfficientFormerConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, ) from transformers.models.efficientformer.modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import EfficientFormerImageProcessor class UpperCamelCase_ : '''simple docstring''' def __init__( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int = 13 , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : int = 128 , UpperCAmelCase__ : Optional[Any]=[16, 32, 64, 128] , UpperCAmelCase__ : int = 7 , UpperCAmelCase__ : int = 4 , UpperCAmelCase__ : int = 37 , UpperCAmelCase__ : str = "gelu" , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : int = 10 , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : int = 128 , UpperCAmelCase__ : List[int] = [2, 2, 2, 2] , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , ) ->List[Any]: '''simple docstring''' A__ = parent A__ = batch_size A__ = image_size A__ = patch_size A__ = num_channels A__ = is_training A__ = use_labels A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = type_sequence_label_size A__ = initializer_range A__ = encoder_stride A__ = num_attention_outputs A__ = embed_dim A__ = embed_dim + 1 A__ = resolution A__ = depths A__ = hidden_sizes A__ = dim A__ = mlp_expansion_ratio def SCREAMING_SNAKE_CASE ( self : List[Any]) ->str: '''simple docstring''' A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size) A__ = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE ( self : int) ->str: '''simple docstring''' return EfficientFormerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict) ->Dict: '''simple docstring''' A__ = TFEfficientFormerModel(config=UpperCAmelCase__) A__ = model(UpperCAmelCase__ , training=UpperCAmelCase__) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str) ->Union[str, Any]: '''simple docstring''' A__ = self.type_sequence_label_size A__ = TFEfficientFormerForImageClassification(UpperCAmelCase__) A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__ , training=UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) # test greyscale images A__ = 1 A__ = TFEfficientFormerForImageClassification(UpperCAmelCase__) A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def SCREAMING_SNAKE_CASE ( self : int) ->List[str]: '''simple docstring''' A__ = self.prepare_config_and_inputs() A__ , A__ , A__ = config_and_inputs A__ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = ( ( TFEfficientFormerModel, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerForImageClassification, ) if is_tf_available() else () ) UpperCAmelCase__ = ( { '''feature-extraction''': TFEfficientFormerModel, '''image-classification''': ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, ), } if is_tf_available() else {} ) UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[str]: '''simple docstring''' A__ = TFEfficientFormerModelTester(self) A__ = ConfigTester( self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37) def SCREAMING_SNAKE_CASE ( self : int) ->Any: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''EfficientFormer does not use inputs_embeds''') def SCREAMING_SNAKE_CASE ( self : List[str]) ->Dict: '''simple docstring''' pass @unittest.skip(reason='''EfficientFormer does not support input and output embeddings''') def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]: '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[Any]: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(UpperCAmelCase__) A__ = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ = [*signature.parameters.keys()] A__ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : str) ->Any: '''simple docstring''' def check_hidden_states_output(UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict): A__ = model_class(UpperCAmelCase__) A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__) A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A__ = getattr( self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1) self.assertEqual(len(UpperCAmelCase__) , UpperCAmelCase__) if hasattr(self.model_tester , '''encoder_seq_length'''): A__ = self.model_tester.encoder_seq_length if hasattr(self.model_tester , '''chunk_length''') and self.model_tester.chunk_length > 1: A__ = seq_length * self.model_tester.chunk_length else: A__ = self.model_tester.seq_length self.assertListEqual( list(hidden_states[-1].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , ) if config.is_encoder_decoder: A__ = outputs.decoder_hidden_states self.asseretIsInstance(UpperCAmelCase__ , (list, tuple)) self.assertEqual(len(UpperCAmelCase__) , UpperCAmelCase__) A__ = getattr(self.model_tester , '''seq_length''' , UpperCAmelCase__) A__ = getattr(self.model_tester , '''decoder_seq_length''' , UpperCAmelCase__) self.assertListEqual( list(hidden_states[-1].shape[-2:]) , [decoder_seq_length, self.model_tester.hidden_size] , ) A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = True check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A__ = True check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict=False) ->int: '''simple docstring''' A__ = super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__) if return_labels: if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__) @unittest.skip(reason='''EfficientFormer does not implement masked image modeling yet''') def SCREAMING_SNAKE_CASE ( self : str) ->str: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Any) ->Tuple: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__) @slow def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]: '''simple docstring''' for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ = TFEfficientFormerModel.from_pretrained(UpperCAmelCase__) self.assertIsNotNone(UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Any) ->str: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = True A__ = getattr(self.model_tester , '''seq_length''' , UpperCAmelCase__) A__ = getattr(self.model_tester , '''encoder_seq_length''' , UpperCAmelCase__) A__ = getattr(self.model_tester , '''key_length''' , UpperCAmelCase__) A__ = getattr(self.model_tester , '''chunk_length''' , UpperCAmelCase__) if chunk_length is not None and hasattr(self.model_tester , '''num_hashes'''): A__ = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: A__ = True A__ = False A__ = True A__ = model_class(UpperCAmelCase__) A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__) A__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(UpperCAmelCase__) , self.model_tester.num_attention_outputs) # check that output_attentions also work using config del inputs_dict["output_attentions"] A__ = True A__ = model_class(UpperCAmelCase__) A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__) A__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(UpperCAmelCase__) , self.model_tester.num_attention_outputs) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:]) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , ) else: self.assertListEqual( list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , ) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[Any]: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Prepare our model A__ = model_class(UpperCAmelCase__) # These are maximally general inputs for the model, with multiple None dimensions # Hopefully this will catch any conditionals that fail for flexible shapes A__ = { key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=UpperCAmelCase__) for key, val in model.input_signature.items() if key in model.dummy_inputs } A__ = model(UpperCAmelCase__) self.assertTrue(outputs_dict is not None) def SCREAMING_SNAKE_CASE ( ) -> Any: """simple docstring""" A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: '''simple docstring''' return ( EfficientFormerImageProcessor.from_pretrained('''snap-research/efficientformer-l1-300''') if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE ( self : List[str]) ->Any: '''simple docstring''' A__ = TFEfficientFormerForImageClassification.from_pretrained('''snap-research/efficientformer-l1-300''') A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(images=UpperCAmelCase__ , return_tensors='''tf''') # forward pass A__ = model(**UpperCAmelCase__ , training=UpperCAmelCase__) # verify the logits A__ = tf.TensorShape((1, 1_000)) self.assertEqual(outputs.logits.shape , UpperCAmelCase__) A__ = tf.constant([-0.0555, 0.4825, -0.0852]) self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4)) @slow def SCREAMING_SNAKE_CASE ( self : Dict) ->int: '''simple docstring''' A__ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained( '''snap-research/efficientformer-l1-300''') A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(images=UpperCAmelCase__ , return_tensors='''tf''') # forward pass A__ = model(**UpperCAmelCase__ , training=UpperCAmelCase__) # verify the logits A__ = tf.TensorShape((1, 1_000)) self.assertEqual(outputs.logits.shape , UpperCAmelCase__) A__ = tf.constant([-0.1312, 0.4353, -1.0499]) self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4))
14
import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[Any]: """simple docstring""" A__ = StableDiffusionPipeline.from_pretrained(lowercase_ , torch_dtype=torch.floataa ) # load LoRA weight from .safetensors A__ = load_file(lowercase_ ) A__ = [] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: A__ = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' ) A__ = pipeline.text_encoder else: A__ = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' ) A__ = pipeline.unet # find the target layer A__ = layer_infos.pop(0 ) while len(lowercase_ ) > -1: try: A__ = curr_layer.__getattr__(lowercase_ ) if len(lowercase_ ) > 0: A__ = layer_infos.pop(0 ) elif len(lowercase_ ) == 0: break except Exception: if len(lowercase_ ) > 0: temp_name += "_" + layer_infos.pop(0 ) else: A__ = layer_infos.pop(0 ) A__ = [] if "lora_down" in key: pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) ) pair_keys.append(lowercase_ ) else: pair_keys.append(lowercase_ ) pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) ) # update weight if len(state_dict[pair_keys[0]].shape ) == 4: A__ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) A__ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(lowercase_ , lowercase_ ).unsqueeze(2 ).unsqueeze(3 ) else: A__ = state_dict[pair_keys[0]].to(torch.floataa ) A__ = state_dict[pair_keys[1]].to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(lowercase_ , lowercase_ ) # update visited list for item in pair_keys: visited.append(lowercase_ ) return pipeline if __name__ == "__main__": _lowerCamelCase : Tuple = argparse.ArgumentParser() parser.add_argument( """--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format.""" ) parser.add_argument( """--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert.""" ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors""" ) parser.add_argument( """--lora_prefix_text_encoder""", default="""lora_te""", type=str, help="""The prefix of text encoder weight in safetensors""", ) parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""") parser.add_argument( """--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not.""" ) parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""") _lowerCamelCase : Tuple = parser.parse_args() _lowerCamelCase : List[Any] = args.base_model_path _lowerCamelCase : Optional[int] = args.checkpoint_path _lowerCamelCase : Dict = args.dump_path _lowerCamelCase : Optional[Any] = args.lora_prefix_unet _lowerCamelCase : Optional[int] = args.lora_prefix_text_encoder _lowerCamelCase : List[Any] = args.alpha _lowerCamelCase : int = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) _lowerCamelCase : Tuple = pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
14
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _lowerCamelCase : Optional[int] = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : int = ["""XLNetTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Union[str, Any] = ["""XLNetTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : str = [ """XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """XLNetForMultipleChoice""", """XLNetForQuestionAnswering""", """XLNetForQuestionAnsweringSimple""", """XLNetForSequenceClassification""", """XLNetForTokenClassification""", """XLNetLMHeadModel""", """XLNetModel""", """XLNetPreTrainedModel""", """load_tf_weights_in_xlnet""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Optional[int] = [ """TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXLNetForMultipleChoice""", """TFXLNetForQuestionAnsweringSimple""", """TFXLNetForSequenceClassification""", """TFXLNetForTokenClassification""", """TFXLNetLMHeadModel""", """TFXLNetMainLayer""", """TFXLNetModel""", """TFXLNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet import XLNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet_fast import XLNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlnet import ( XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, XLNetForMultipleChoice, XLNetForQuestionAnswering, XLNetForQuestionAnsweringSimple, XLNetForSequenceClassification, XLNetForTokenClassification, XLNetLMHeadModel, XLNetModel, XLNetPreTrainedModel, load_tf_weights_in_xlnet, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlnet import ( TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLNetForMultipleChoice, TFXLNetForQuestionAnsweringSimple, TFXLNetForSequenceClassification, TFXLNetForTokenClassification, TFXLNetLMHeadModel, TFXLNetMainLayer, TFXLNetModel, TFXLNetPreTrainedModel, ) else: import sys _lowerCamelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
14
import os import pytest from transformers.dynamic_module_utils import get_imports _lowerCamelCase : Any = """ import os """ _lowerCamelCase : Optional[int] = """ def foo(): import os return False """ _lowerCamelCase : List[Any] = """ def foo(): def bar(): if True: import os return False return bar() """ _lowerCamelCase : List[Any] = """ import os try: import bar except ImportError: raise ValueError() """ _lowerCamelCase : Union[str, Any] = """ import os def foo(): try: import bar except ImportError: raise ValueError() """ _lowerCamelCase : List[Any] = """ import os try: import bar except (ImportError, AttributeError): raise ValueError() """ _lowerCamelCase : List[Any] = """ import os try: import bar except ImportError as e: raise ValueError() """ _lowerCamelCase : str = """ import os try: import bar except: raise ValueError() """ _lowerCamelCase : Optional[Any] = """ import os try: import bar import baz except ImportError: raise ValueError() """ _lowerCamelCase : Any = """ import os try: import bar import baz except ImportError: x = 1 raise ValueError() """ _lowerCamelCase : Dict = [ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize('''case''' , lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int: """simple docstring""" A__ = os.path.join(lowercase_ , '''test_file.py''' ) with open(lowercase_ , '''w''' ) as _tmp_file: _tmp_file.write(lowercase_ ) A__ = get_imports(lowercase_ ) assert parsed_imports == ["os"]
14
1
import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = DiTPipeline UpperCAmelCase__ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS UpperCAmelCase__ = PipelineTesterMixin.required_optional_params - { '''latents''', '''num_images_per_prompt''', '''callback''', '''callback_steps''', } UpperCAmelCase__ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS UpperCAmelCase__ = False def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->str: '''simple docstring''' torch.manual_seed(0) A__ = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=UpperCAmelCase__ , activation_fn='''gelu-approximate''' , num_embeds_ada_norm=1_000 , norm_type='''ada_norm_zero''' , norm_elementwise_affine=UpperCAmelCase__ , ) A__ = AutoencoderKL() A__ = DDIMScheduler() A__ = {'''transformer''': transformer.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler} return components def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : int=0) ->Dict: '''simple docstring''' if str(UpperCAmelCase__).startswith('''mps'''): A__ = torch.manual_seed(UpperCAmelCase__) else: A__ = torch.Generator(device=UpperCAmelCase__).manual_seed(UpperCAmelCase__) A__ = { '''class_labels''': [1], '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int: '''simple docstring''' A__ = '''cpu''' A__ = self.get_dummy_components() A__ = self.pipeline_class(**UpperCAmelCase__) pipe.to(UpperCAmelCase__) pipe.set_progress_bar_config(disable=UpperCAmelCase__) A__ = self.get_dummy_inputs(UpperCAmelCase__) A__ = pipe(**UpperCAmelCase__).images A__ = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3)) A__ = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457]) A__ = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(UpperCAmelCase__ , 1e-3) def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[str]: '''simple docstring''' self._test_inference_batch_single_identical(relax_max_difference=UpperCAmelCase__ , expected_max_diff=1e-3) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def SCREAMING_SNAKE_CASE ( self : int) ->Dict: '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3) @require_torch_gpu @slow class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE ( self : int) ->Union[str, Any]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE ( self : Dict) ->int: '''simple docstring''' A__ = torch.manual_seed(0) A__ = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-256''') pipe.to('''cuda''') A__ = ['''vase''', '''umbrella''', '''white shark''', '''white wolf'''] A__ = pipe.get_label_ids(UpperCAmelCase__) A__ = pipe(UpperCAmelCase__ , generator=UpperCAmelCase__ , num_inference_steps=40 , output_type='''np''').images for word, image in zip(UpperCAmelCase__ , UpperCAmelCase__): A__ = load_numpy( f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""") assert np.abs((expected_image - image).max()) < 1e-2 def SCREAMING_SNAKE_CASE ( self : str) ->Union[str, Any]: '''simple docstring''' A__ = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-512''') A__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe.to('''cuda''') A__ = ['''vase''', '''umbrella'''] A__ = pipe.get_label_ids(UpperCAmelCase__) A__ = torch.manual_seed(0) A__ = pipe(UpperCAmelCase__ , generator=UpperCAmelCase__ , num_inference_steps=25 , output_type='''np''').images for word, image in zip(UpperCAmelCase__ , UpperCAmelCase__): A__ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' f"""/dit/{word}_512.npy""") assert np.abs((expected_image - image).max()) < 1e-1
14
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int: """simple docstring""" return int(input_a == input_a == 0 ) def SCREAMING_SNAKE_CASE ( ) -> None: """simple docstring""" print('''Truth Table of NOR Gate:''' ) print('''| Input 1 | Input 2 | Output |''' ) print(f"""| 0 | 0 | {nor_gate(0 , 0 )} |""" ) print(f"""| 0 | 1 | {nor_gate(0 , 1 )} |""" ) print(f"""| 1 | 0 | {nor_gate(1 , 0 )} |""" ) print(f"""| 1 | 1 | {nor_gate(1 , 1 )} |""" ) if __name__ == "__main__": import doctest doctest.testmod() main()
14
1
import inspect import os import unittest import torch import accelerate from accelerate import debug_launcher from accelerate.test_utils import ( execute_subprocess_async, require_cpu, require_huggingface_suite, require_multi_gpu, require_single_gpu, ) from accelerate.utils import patch_environment @require_huggingface_suite class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE ( self : Tuple) ->Union[str, Any]: '''simple docstring''' A__ = inspect.getfile(accelerate.test_utils) A__ = os.path.sep.join( mod_file.split(os.path.sep)[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py''']) from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401 A__ = test_metrics @require_cpu def SCREAMING_SNAKE_CASE ( self : int) ->Tuple: '''simple docstring''' debug_launcher(self.test_metrics.main , num_processes=1) @require_cpu def SCREAMING_SNAKE_CASE ( self : Dict) ->List[Any]: '''simple docstring''' debug_launcher(self.test_metrics.main) @require_single_gpu def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[Any]: '''simple docstring''' self.test_metrics.main() @require_multi_gpu def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]: '''simple docstring''' print(f"""Found {torch.cuda.device_count()} devices.""") A__ = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path] with patch_environment(omp_num_threads=1): execute_subprocess_async(UpperCAmelCase__ , env=os.environ.copy())
14
import os import sys import unittest _lowerCamelCase : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) _lowerCamelCase : Any = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""") _lowerCamelCase : str = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""") class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE ( self : Tuple) ->Tuple: '''simple docstring''' A__ = get_test_to_tester_mapping(UpperCAmelCase__) A__ = get_test_to_tester_mapping(UpperCAmelCase__) A__ = {'''BertModelTest''': '''BertModelTester'''} A__ = { '''BlipModelTest''': '''BlipModelTester''', '''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''', '''BlipTextModelTest''': '''BlipTextModelTester''', '''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''', '''BlipVQAModelTest''': '''BlipVQAModelTester''', '''BlipVisionModelTest''': '''BlipVisionModelTester''', } self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__) self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[Any]: '''simple docstring''' A__ = get_model_to_test_mapping(UpperCAmelCase__) A__ = get_model_to_test_mapping(UpperCAmelCase__) A__ = { '''BertForMaskedLM''': ['''BertModelTest'''], '''BertForMultipleChoice''': ['''BertModelTest'''], '''BertForNextSentencePrediction''': ['''BertModelTest'''], '''BertForPreTraining''': ['''BertModelTest'''], '''BertForQuestionAnswering''': ['''BertModelTest'''], '''BertForSequenceClassification''': ['''BertModelTest'''], '''BertForTokenClassification''': ['''BertModelTest'''], '''BertLMHeadModel''': ['''BertModelTest'''], '''BertModel''': ['''BertModelTest'''], } A__ = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''], '''BlipModel''': ['''BlipModelTest'''], '''BlipTextModel''': ['''BlipTextModelTest'''], '''BlipVisionModel''': ['''BlipVisionModelTest'''], } self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__) self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->str: '''simple docstring''' A__ = get_model_to_tester_mapping(UpperCAmelCase__) A__ = get_model_to_tester_mapping(UpperCAmelCase__) A__ = { '''BertForMaskedLM''': ['''BertModelTester'''], '''BertForMultipleChoice''': ['''BertModelTester'''], '''BertForNextSentencePrediction''': ['''BertModelTester'''], '''BertForPreTraining''': ['''BertModelTester'''], '''BertForQuestionAnswering''': ['''BertModelTester'''], '''BertForSequenceClassification''': ['''BertModelTester'''], '''BertForTokenClassification''': ['''BertModelTester'''], '''BertLMHeadModel''': ['''BertModelTester'''], '''BertModel''': ['''BertModelTester'''], } A__ = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''], '''BlipModel''': ['''BlipModelTester'''], '''BlipTextModel''': ['''BlipTextModelTester'''], '''BlipVisionModel''': ['''BlipVisionModelTester'''], } self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__) self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
14
1
import os import pytest from transformers.dynamic_module_utils import get_imports _lowerCamelCase : Any = """ import os """ _lowerCamelCase : Optional[int] = """ def foo(): import os return False """ _lowerCamelCase : List[Any] = """ def foo(): def bar(): if True: import os return False return bar() """ _lowerCamelCase : List[Any] = """ import os try: import bar except ImportError: raise ValueError() """ _lowerCamelCase : Union[str, Any] = """ import os def foo(): try: import bar except ImportError: raise ValueError() """ _lowerCamelCase : List[Any] = """ import os try: import bar except (ImportError, AttributeError): raise ValueError() """ _lowerCamelCase : List[Any] = """ import os try: import bar except ImportError as e: raise ValueError() """ _lowerCamelCase : str = """ import os try: import bar except: raise ValueError() """ _lowerCamelCase : Optional[Any] = """ import os try: import bar import baz except ImportError: raise ValueError() """ _lowerCamelCase : Any = """ import os try: import bar import baz except ImportError: x = 1 raise ValueError() """ _lowerCamelCase : Dict = [ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize('''case''' , lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int: """simple docstring""" A__ = os.path.join(lowercase_ , '''test_file.py''' ) with open(lowercase_ , '''w''' ) as _tmp_file: _tmp_file.write(lowercase_ ) A__ = get_imports(lowercase_ ) assert parsed_imports == ["os"]
14
import inspect import unittest from typing import List import numpy as np from transformers import EfficientFormerConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, ) from transformers.models.efficientformer.modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import EfficientFormerImageProcessor class UpperCamelCase_ : '''simple docstring''' def __init__( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int = 13 , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : int = 128 , UpperCAmelCase__ : Optional[Any]=[16, 32, 64, 128] , UpperCAmelCase__ : int = 7 , UpperCAmelCase__ : int = 4 , UpperCAmelCase__ : int = 37 , UpperCAmelCase__ : str = "gelu" , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : int = 10 , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : int = 128 , UpperCAmelCase__ : List[int] = [2, 2, 2, 2] , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , ) ->List[Any]: '''simple docstring''' A__ = parent A__ = batch_size A__ = image_size A__ = patch_size A__ = num_channels A__ = is_training A__ = use_labels A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = type_sequence_label_size A__ = initializer_range A__ = encoder_stride A__ = num_attention_outputs A__ = embed_dim A__ = embed_dim + 1 A__ = resolution A__ = depths A__ = hidden_sizes A__ = dim A__ = mlp_expansion_ratio def SCREAMING_SNAKE_CASE ( self : List[Any]) ->str: '''simple docstring''' A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size) A__ = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE ( self : int) ->str: '''simple docstring''' return EfficientFormerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict) ->Dict: '''simple docstring''' A__ = TFEfficientFormerModel(config=UpperCAmelCase__) A__ = model(UpperCAmelCase__ , training=UpperCAmelCase__) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str) ->Union[str, Any]: '''simple docstring''' A__ = self.type_sequence_label_size A__ = TFEfficientFormerForImageClassification(UpperCAmelCase__) A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__ , training=UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) # test greyscale images A__ = 1 A__ = TFEfficientFormerForImageClassification(UpperCAmelCase__) A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def SCREAMING_SNAKE_CASE ( self : int) ->List[str]: '''simple docstring''' A__ = self.prepare_config_and_inputs() A__ , A__ , A__ = config_and_inputs A__ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = ( ( TFEfficientFormerModel, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerForImageClassification, ) if is_tf_available() else () ) UpperCAmelCase__ = ( { '''feature-extraction''': TFEfficientFormerModel, '''image-classification''': ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, ), } if is_tf_available() else {} ) UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[str]: '''simple docstring''' A__ = TFEfficientFormerModelTester(self) A__ = ConfigTester( self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37) def SCREAMING_SNAKE_CASE ( self : int) ->Any: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''EfficientFormer does not use inputs_embeds''') def SCREAMING_SNAKE_CASE ( self : List[str]) ->Dict: '''simple docstring''' pass @unittest.skip(reason='''EfficientFormer does not support input and output embeddings''') def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]: '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[Any]: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(UpperCAmelCase__) A__ = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ = [*signature.parameters.keys()] A__ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : str) ->Any: '''simple docstring''' def check_hidden_states_output(UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict): A__ = model_class(UpperCAmelCase__) A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__) A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A__ = getattr( self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1) self.assertEqual(len(UpperCAmelCase__) , UpperCAmelCase__) if hasattr(self.model_tester , '''encoder_seq_length'''): A__ = self.model_tester.encoder_seq_length if hasattr(self.model_tester , '''chunk_length''') and self.model_tester.chunk_length > 1: A__ = seq_length * self.model_tester.chunk_length else: A__ = self.model_tester.seq_length self.assertListEqual( list(hidden_states[-1].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , ) if config.is_encoder_decoder: A__ = outputs.decoder_hidden_states self.asseretIsInstance(UpperCAmelCase__ , (list, tuple)) self.assertEqual(len(UpperCAmelCase__) , UpperCAmelCase__) A__ = getattr(self.model_tester , '''seq_length''' , UpperCAmelCase__) A__ = getattr(self.model_tester , '''decoder_seq_length''' , UpperCAmelCase__) self.assertListEqual( list(hidden_states[-1].shape[-2:]) , [decoder_seq_length, self.model_tester.hidden_size] , ) A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = True check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A__ = True check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict=False) ->int: '''simple docstring''' A__ = super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__) if return_labels: if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__) @unittest.skip(reason='''EfficientFormer does not implement masked image modeling yet''') def SCREAMING_SNAKE_CASE ( self : str) ->str: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Any) ->Tuple: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__) @slow def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]: '''simple docstring''' for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ = TFEfficientFormerModel.from_pretrained(UpperCAmelCase__) self.assertIsNotNone(UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Any) ->str: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = True A__ = getattr(self.model_tester , '''seq_length''' , UpperCAmelCase__) A__ = getattr(self.model_tester , '''encoder_seq_length''' , UpperCAmelCase__) A__ = getattr(self.model_tester , '''key_length''' , UpperCAmelCase__) A__ = getattr(self.model_tester , '''chunk_length''' , UpperCAmelCase__) if chunk_length is not None and hasattr(self.model_tester , '''num_hashes'''): A__ = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: A__ = True A__ = False A__ = True A__ = model_class(UpperCAmelCase__) A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__) A__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(UpperCAmelCase__) , self.model_tester.num_attention_outputs) # check that output_attentions also work using config del inputs_dict["output_attentions"] A__ = True A__ = model_class(UpperCAmelCase__) A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__) A__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(UpperCAmelCase__) , self.model_tester.num_attention_outputs) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:]) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , ) else: self.assertListEqual( list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , ) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[Any]: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Prepare our model A__ = model_class(UpperCAmelCase__) # These are maximally general inputs for the model, with multiple None dimensions # Hopefully this will catch any conditionals that fail for flexible shapes A__ = { key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=UpperCAmelCase__) for key, val in model.input_signature.items() if key in model.dummy_inputs } A__ = model(UpperCAmelCase__) self.assertTrue(outputs_dict is not None) def SCREAMING_SNAKE_CASE ( ) -> Any: """simple docstring""" A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: '''simple docstring''' return ( EfficientFormerImageProcessor.from_pretrained('''snap-research/efficientformer-l1-300''') if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE ( self : List[str]) ->Any: '''simple docstring''' A__ = TFEfficientFormerForImageClassification.from_pretrained('''snap-research/efficientformer-l1-300''') A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(images=UpperCAmelCase__ , return_tensors='''tf''') # forward pass A__ = model(**UpperCAmelCase__ , training=UpperCAmelCase__) # verify the logits A__ = tf.TensorShape((1, 1_000)) self.assertEqual(outputs.logits.shape , UpperCAmelCase__) A__ = tf.constant([-0.0555, 0.4825, -0.0852]) self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4)) @slow def SCREAMING_SNAKE_CASE ( self : Dict) ->int: '''simple docstring''' A__ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained( '''snap-research/efficientformer-l1-300''') A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(images=UpperCAmelCase__ , return_tensors='''tf''') # forward pass A__ = model(**UpperCAmelCase__ , training=UpperCAmelCase__) # verify the logits A__ = tf.TensorShape((1, 1_000)) self.assertEqual(outputs.logits.shape , UpperCAmelCase__) A__ = tf.constant([-0.1312, 0.4353, -1.0499]) self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4))
14
1
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> "list[int]": """simple docstring""" if upper_limit < 0: raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' ) A__ = [0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 A__ = 1 if upper_limit > 0: A__ = 1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 , upper_limit + 1 ): for j in range(lowercase_ ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""") print("""\n*** Enter -1 at any time to quit ***""") print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""") try: while True: _lowerCamelCase : List[Any] = int(input().strip()) if N < 0: print("""\n********* Goodbye!! ************""") break else: print(F'''The Catalan numbers from 0 through {N} are:''') print(catalan_numbers(N)) print("""Try another upper limit for the sequence: """, end="""""") except (NameError, ValueError): print("""\n********* Invalid input, goodbye! ************\n""") import doctest doctest.testmod()
14
from __future__ import annotations def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> tuple[float, list[float]]: """simple docstring""" A__ = list(range(len(lowercase_ ) ) ) A__ = [v / w for v, w in zip(lowercase_ , lowercase_ )] index.sort(key=lambda lowercase_ : ratio[i] , reverse=lowercase_ ) A__ = 0 A__ = [0] * len(lowercase_ ) for i in index: if weight[i] <= capacity: A__ = 1 max_value += value[i] capacity -= weight[i] else: A__ = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
14
1
import shutil import tempfile import unittest from transformers import ( SPIECE_UNDERLINE, AddedToken, BatchEncoding, NllbTokenizer, NllbTokenizerFast, is_torch_available, ) from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin _lowerCamelCase : Tuple = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right _lowerCamelCase : Tuple = 256047 _lowerCamelCase : int = 256145 @require_sentencepiece @require_tokenizers class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = NllbTokenizer UpperCAmelCase__ = NllbTokenizerFast UpperCAmelCase__ = True UpperCAmelCase__ = True UpperCAmelCase__ = {} def SCREAMING_SNAKE_CASE ( self : str) ->Optional[int]: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing A__ = NllbTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__) tokenizer.save_pretrained(self.tmpdirname) def SCREAMING_SNAKE_CASE ( self : Tuple) ->int: '''simple docstring''' A__ = NllbTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__) A__ = tokenizer.tokenize('''This is a test''') self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase__) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) A__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''') self.assertListEqual( UpperCAmelCase__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) A__ = tokenizer.convert_tokens_to_ids(UpperCAmelCase__) self.assertListEqual( UpperCAmelCase__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) A__ = tokenizer.convert_ids_to_tokens(UpperCAmelCase__) self.assertListEqual( UpperCAmelCase__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Dict: '''simple docstring''' A__ = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-nllb''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})"""): A__ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__) A__ = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__) A__ = tempfile.mkdtemp() A__ = tokenizer_r.save_pretrained(UpperCAmelCase__) A__ = tokenizer_p.save_pretrained(UpperCAmelCase__) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files)) A__ = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f) self.assertSequenceEqual(UpperCAmelCase__ , UpperCAmelCase__) # Checks everything loads correctly in the same way A__ = tokenizer_r.from_pretrained(UpperCAmelCase__) A__ = tokenizer_p.from_pretrained(UpperCAmelCase__) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCAmelCase__ , UpperCAmelCase__)) shutil.rmtree(UpperCAmelCase__) # Save tokenizer rust, legacy_format=True A__ = tempfile.mkdtemp() A__ = tokenizer_r.save_pretrained(UpperCAmelCase__ , legacy_format=UpperCAmelCase__) A__ = tokenizer_p.save_pretrained(UpperCAmelCase__) # Checks it save with the same files self.assertSequenceEqual(UpperCAmelCase__ , UpperCAmelCase__) # Checks everything loads correctly in the same way A__ = tokenizer_r.from_pretrained(UpperCAmelCase__) A__ = tokenizer_p.from_pretrained(UpperCAmelCase__) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCAmelCase__ , UpperCAmelCase__)) shutil.rmtree(UpperCAmelCase__) # Save tokenizer rust, legacy_format=False A__ = tempfile.mkdtemp() A__ = tokenizer_r.save_pretrained(UpperCAmelCase__ , legacy_format=UpperCAmelCase__) A__ = tokenizer_p.save_pretrained(UpperCAmelCase__) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files)) # Checks everything loads correctly in the same way A__ = tokenizer_r.from_pretrained(UpperCAmelCase__) A__ = tokenizer_p.from_pretrained(UpperCAmelCase__) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCAmelCase__ , UpperCAmelCase__)) shutil.rmtree(UpperCAmelCase__) @require_torch def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Any: '''simple docstring''' if not self.test_seqaseq: return A__ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}"""): # Longer text that will definitely require truncation. A__ = [ ''' UN Chief Says There Is No Military Solution in Syria''', ''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for''' ''' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons''' ''' will only worsen the violence and misery for millions of people.''', ] A__ = [ '''Şeful ONU declară că nu există o soluţie militară în Siria''', '''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al''' ''' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi''' ''' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''', ] try: A__ = tokenizer.prepare_seqaseq_batch( src_texts=UpperCAmelCase__ , tgt_texts=UpperCAmelCase__ , max_length=3 , max_target_length=10 , return_tensors='''pt''' , src_lang='''eng_Latn''' , tgt_lang='''ron_Latn''' , ) except NotImplementedError: return self.assertEqual(batch.input_ids.shape[1] , 3) self.assertEqual(batch.labels.shape[1] , 10) # max_target_length will default to max_length if not specified A__ = tokenizer.prepare_seqaseq_batch( UpperCAmelCase__ , tgt_texts=UpperCAmelCase__ , max_length=3 , return_tensors='''pt''') self.assertEqual(batch.input_ids.shape[1] , 3) self.assertEqual(batch.labels.shape[1] , 3) A__ = tokenizer.prepare_seqaseq_batch( src_texts=UpperCAmelCase__ , max_length=3 , max_target_length=10 , return_tensors='''pt''') self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3) self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3) self.assertNotIn('''decoder_input_ids''' , UpperCAmelCase__) @unittest.skip('''Unfortunately way too slow to build a BPE with SentencePiece.''') def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[Any]: '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[Any]: '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})"""): A__ = [AddedToken('''<special>''' , lstrip=UpperCAmelCase__)] A__ = self.rust_tokenizer_class.from_pretrained( UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , **UpperCAmelCase__) A__ = tokenizer_r.encode('''Hey this is a <special> token''') A__ = tokenizer_r.encode('''<special>''' , add_special_tokens=UpperCAmelCase__)[0] self.assertTrue(special_token_id in r_output) if self.test_slow_tokenizer: A__ = self.rust_tokenizer_class.from_pretrained( UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , **UpperCAmelCase__ , ) A__ = self.tokenizer_class.from_pretrained( UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , **UpperCAmelCase__) A__ = tokenizer_p.encode('''Hey this is a <special> token''') A__ = tokenizer_cr.encode('''Hey this is a <special> token''') self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__) self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__) self.assertTrue(special_token_id in p_output) self.assertTrue(special_token_id in cr_output) @require_torch @require_sentencepiece @require_tokenizers class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = '''facebook/nllb-200-distilled-600M''' UpperCAmelCase__ = [ ''' UN Chief Says There Is No Military Solution in Syria''', ''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''', ] UpperCAmelCase__ = [ '''Şeful ONU declară că nu există o soluţie militară în Siria''', '''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei''' ''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor''' ''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''', ] UpperCAmelCase__ = [ 25_6047, 1_6297, 13_4408, 8165, 24_8066, 1_4734, 950, 1135, 10_5721, 3573, 83, 2_7352, 108, 4_9486, 2, ] @classmethod def SCREAMING_SNAKE_CASE ( cls : Tuple) ->int: '''simple docstring''' A__ = NllbTokenizer.from_pretrained( cls.checkpoint_name , src_lang='''eng_Latn''' , tgt_lang='''ron_Latn''') A__ = 1 return cls def SCREAMING_SNAKE_CASE ( self : Dict) ->Dict: '''simple docstring''' self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Arab'''] , 256_001) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Latn'''] , 256_002) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''fra_Latn'''] , 256_057) def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Tuple: '''simple docstring''' A__ = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0] self.assertListEqual(self.expected_src_tokens , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]: '''simple docstring''' self.assertIn(UpperCAmelCase__ , self.tokenizer.all_special_ids) # fmt: off A__ = [RO_CODE, 4_254, 98_068, 112_923, 39_072, 3_909, 713, 102_767, 26, 17_314, 35_642, 14_683, 33_118, 2_022, 66_987, 2, 256_047] # fmt: on A__ = self.tokenizer.decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__) A__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCAmelCase__) self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__) self.assertNotIn(self.tokenizer.eos_token , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Optional[int]: '''simple docstring''' A__ = ['''this is gunna be a long sentence ''' * 20] assert isinstance(src_text[0] , UpperCAmelCase__) A__ = 10 A__ = self.tokenizer(UpperCAmelCase__ , max_length=UpperCAmelCase__ , truncation=UpperCAmelCase__).input_ids[0] self.assertEqual(ids[-1] , 2) self.assertEqual(ids[0] , UpperCAmelCase__) self.assertEqual(len(UpperCAmelCase__) , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[str]: '''simple docstring''' self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR''']) , [256_203, 3]) def SCREAMING_SNAKE_CASE ( self : str) ->str: '''simple docstring''' A__ = tempfile.mkdtemp() A__ = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(UpperCAmelCase__) A__ = NllbTokenizer.from_pretrained(UpperCAmelCase__) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCAmelCase__) @require_torch def SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]: '''simple docstring''' A__ = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=len(self.expected_src_tokens) , return_tensors='''pt''' , ) A__ = shift_tokens_right( batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id['''ron_Latn''']) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__) self.assertEqual((2, 15) , batch.input_ids.shape) self.assertEqual((2, 15) , batch.attention_mask.shape) A__ = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , UpperCAmelCase__) self.assertEqual(UpperCAmelCase__ , batch.decoder_input_ids[0, 0]) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE]) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id]) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple: '''simple docstring''' A__ = self.tokenizer(self.src_text , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=3 , return_tensors='''pt''') A__ = self.tokenizer( text_target=self.tgt_text , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=10 , return_tensors='''pt''') A__ = targets['''input_ids'''] A__ = shift_tokens_right( UpperCAmelCase__ , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , ) self.assertEqual(batch.input_ids.shape[1] , 3) self.assertEqual(batch.decoder_input_ids.shape[1] , 10) @require_torch def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[Any]: '''simple docstring''' A__ = self.tokenizer._build_translation_inputs( '''A test''' , return_tensors='''pt''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''') self.assertEqual( nested_simplify(UpperCAmelCase__) , { # A, test, EOS, en_XX '''input_ids''': [[256_047, 70, 7_356, 2]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 256_057, } , ) @require_torch def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[Any]: '''simple docstring''' A__ = True A__ = self.tokenizer( '''UN Chief says there is no military solution in Syria''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''') self.assertEqual( inputs.input_ids , [16_297, 134_408, 25_653, 6_370, 248, 254, 103_929, 94_995, 108, 49_486, 2, 256_047]) A__ = False A__ = self.tokenizer( '''UN Chief says there is no military solution in Syria''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''') self.assertEqual( inputs.input_ids , [256_047, 16_297, 134_408, 25_653, 6_370, 248, 254, 103_929, 94_995, 108, 49_486, 2])
14
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[Any]: """simple docstring""" A__ = args.log_outputs A__ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] ) # load metric A__ = load_metric('''wer''' ) A__ = load_metric('''cer''' ) # compute metrics A__ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] ) A__ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] ) # print & log results A__ = f"""WER: {wer_result}\nCER: {cer_result}""" print(lowercase_ ) with open(f"""{dataset_id}_eval_results.txt""" , '''w''' ) as f: f.write(lowercase_ ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: A__ = f"""log_{dataset_id}_predictions.txt""" A__ = f"""log_{dataset_id}_targets.txt""" with open(lowercase_ , '''w''' ) as p, open(lowercase_ , '''w''' ) as t: # mapping function to write output def write_to_file(lowercase_ , lowercase_ ): p.write(f"""{i}""" + '''\n''' ) p.write(batch['''prediction'''] + '''\n''' ) t.write(f"""{i}""" + '''\n''' ) t.write(batch['''target'''] + '''\n''' ) result.map(lowercase_ , with_indices=lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> str: """simple docstring""" A__ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training A__ = re.sub(lowercase_ , '''''' , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! A__ = ['''\n\n''', '''\n''', ''' ''', ''' '''] for t in token_sequences_to_ignore: A__ = ''' '''.join(text.split(lowercase_ ) ) return text def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]: """simple docstring""" A__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowercase_ ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor A__ = AutoFeatureExtractor.from_pretrained(args.model_id ) A__ = feature_extractor.sampling_rate # resample audio A__ = dataset.cast_column('''audio''' , Audio(sampling_rate=lowercase_ ) ) # load eval pipeline if args.device is None: A__ = 0 if torch.cuda.is_available() else -1 A__ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(lowercase_ ): A__ = asr( batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) A__ = prediction['''text'''] A__ = normalize_text(batch['''sentence'''] ) return batch # run inference on all examples A__ = dataset.map(lowercase_ , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(lowercase_ , lowercase_ ) if __name__ == "__main__": _lowerCamelCase : List[Any] = argparse.ArgumentParser() parser.add_argument( """--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers""" ) parser.add_argument( """--dataset""", type=str, required=True, help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""", ) parser.add_argument( """--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice""" ) parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""") parser.add_argument( """--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds.""" ) parser.add_argument( """--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second.""" ) parser.add_argument( """--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis.""" ) parser.add_argument( """--device""", type=int, default=None, help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""", ) _lowerCamelCase : str = parser.parse_args() main(args)
14
1
import warnings from ...utils import logging from .image_processing_flava import FlavaImageProcessor _lowerCamelCase : str = logging.get_logger(__name__) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : int , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : List[Any]) ->None: '''simple docstring''' warnings.warn( '''The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use FlavaImageProcessor instead.''' , UpperCAmelCase__ , ) super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__)
14
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) _lowerCamelCase : int = { """configuration_blip""": [ """BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BlipConfig""", """BlipTextConfig""", """BlipVisionConfig""", ], """processing_blip""": ["""BlipProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Tuple = ["""BlipImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : List[Any] = [ """BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """BlipModel""", """BlipPreTrainedModel""", """BlipForConditionalGeneration""", """BlipForQuestionAnswering""", """BlipVisionModel""", """BlipTextModel""", """BlipForImageTextRetrieval""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Optional[Any] = [ """TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFBlipModel""", """TFBlipPreTrainedModel""", """TFBlipForConditionalGeneration""", """TFBlipForQuestionAnswering""", """TFBlipVisionModel""", """TFBlipTextModel""", """TFBlipForImageTextRetrieval""", ] if TYPE_CHECKING: from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig from .processing_blip import BlipProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_blip import BlipImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip import ( BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipPreTrainedModel, BlipTextModel, BlipVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blip import ( TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, TFBlipModel, TFBlipPreTrainedModel, TFBlipTextModel, TFBlipVisionModel, ) else: import sys _lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
14
1
from pathlib import Path import numpy as np from PIL import Image def SCREAMING_SNAKE_CASE ( lowercase_ ) -> np.ndarray: """simple docstring""" A__ , A__ , A__ = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] return 0.29_89 * r + 0.58_70 * g + 0.11_40 * b def SCREAMING_SNAKE_CASE ( lowercase_ ) -> np.ndarray: """simple docstring""" return (gray > 127) & (gray <= 255) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> np.ndarray: """simple docstring""" A__ = np.zeros_like(lowercase_ ) A__ = np.zeros( (image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) ) # Copy image to padded image A__ = image # Iterate over image & apply kernel for x in range(image.shape[1] ): for y in range(image.shape[0] ): A__ = ( kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]] ).sum() A__ = int(summation > 0 ) return output if __name__ == "__main__": # read original image _lowerCamelCase : Tuple = Path(__file__).resolve().parent / """image_data""" / """lena.jpg""" _lowerCamelCase : Any = np.array(Image.open(lena_path)) # kernel to be applied _lowerCamelCase : int = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) _lowerCamelCase : List[str] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element) # Save the output image _lowerCamelCase : Dict = Image.fromarray(output).convert("""RGB""") pil_img.save("""result_dilation.png""")
14
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCamelCase : List[str] = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : List[Any] = [ """VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""", """ViTMSNModel""", """ViTMSNForImageClassification""", """ViTMSNPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_msn import ( VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, ) else: import sys _lowerCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
14
1
from random import shuffle import tensorflow as tf from numpy import array def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[Any]: """simple docstring""" A__ = int(lowercase_ ) assert noofclusters < len(lowercase_ ) # Find out the dimensionality A__ = len(vectors[0] ) # Will help select random centroids from among the available vectors A__ = list(range(len(lowercase_ ) ) ) shuffle(lowercase_ ) # GRAPH OF COMPUTATION # We initialize a new graph and set it as the default during each run # of this algorithm. This ensures that as this function is called # multiple times, the default graph doesn't keep getting crowded with # unused ops and Variables from previous function calls. A__ = tf.Graph() with graph.as_default(): # SESSION OF COMPUTATION A__ = tf.Session() ##CONSTRUCTING THE ELEMENTS OF COMPUTATION ##First lets ensure we have a Variable vector for each centroid, ##initialized to one of the vectors from the available data points A__ = [ tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase_ ) ] ##These nodes will assign the centroid Variables the appropriate ##values A__ = tf.placeholder('''float64''' , [dim] ) A__ = [] for centroid in centroids: cent_assigns.append(tf.assign(lowercase_ , lowercase_ ) ) ##Variables for cluster assignments of individual vectors(initialized ##to 0 at first) A__ = [tf.Variable(0 ) for i in range(len(lowercase_ ) )] ##These nodes will assign an assignment Variable the appropriate ##value A__ = tf.placeholder('''int32''' ) A__ = [] for assignment in assignments: cluster_assigns.append(tf.assign(lowercase_ , lowercase_ ) ) ##Now lets construct the node that will compute the mean # The placeholder for the input A__ = tf.placeholder('''float''' , [None, dim] ) # The Node/op takes the input and computes a mean along the 0th # dimension, i.e. the list of input vectors A__ = tf.reduce_mean(lowercase_ , 0 ) ##Node for computing Euclidean distances # Placeholders for input A__ = tf.placeholder('''float''' , [dim] ) A__ = tf.placeholder('''float''' , [dim] ) A__ = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase_ , lowercase_ ) , 2 ) ) ) ##This node will figure out which cluster to assign a vector to, ##based on Euclidean distances of the vector from the centroids. # Placeholder for input A__ = tf.placeholder('''float''' , [noofclusters] ) A__ = tf.argmin(lowercase_ , 0 ) ##INITIALIZING STATE VARIABLES ##This will help initialization of all Variables defined with respect ##to the graph. The Variable-initializer should be defined after ##all the Variables have been constructed, so that each of them ##will be included in the initialization. A__ = tf.initialize_all_variables() # Initialize all variables sess.run(lowercase_ ) ##CLUSTERING ITERATIONS # Now perform the Expectation-Maximization steps of K-Means clustering # iterations. To keep things simple, we will only do a set number of # iterations, instead of using a Stopping Criterion. A__ = 100 for _ in range(lowercase_ ): ##EXPECTATION STEP ##Based on the centroid locations till last iteration, compute ##the _expected_ centroid assignments. # Iterate over each vector for vector_n in range(len(lowercase_ ) ): A__ = vectors[vector_n] # Compute Euclidean distance between this vector and each # centroid. Remember that this list cannot be named #'centroid_distances', since that is the input to the # cluster assignment node. A__ = [ sess.run(lowercase_ , feed_dict={va: vect, va: sess.run(lowercase_ )} ) for centroid in centroids ] # Now use the cluster assignment node, with the distances # as the input A__ = sess.run( lowercase_ , feed_dict={centroid_distances: distances} ) # Now assign the value to the appropriate state variable sess.run( cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} ) ##MAXIMIZATION STEP # Based on the expected state computed from the Expectation Step, # compute the locations of the centroids so as to maximize the # overall objective of minimizing within-cluster Sum-of-Squares for cluster_n in range(lowercase_ ): # Collect all the vectors assigned to this cluster A__ = [ vectors[i] for i in range(len(lowercase_ ) ) if sess.run(assignments[i] ) == cluster_n ] # Compute new centroid location A__ = sess.run( lowercase_ , feed_dict={mean_input: array(lowercase_ )} ) # Assign value to appropriate variable sess.run( cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} ) # Return centroids and assignments A__ = sess.run(lowercase_ ) A__ = sess.run(lowercase_ ) return centroids, assignments
14
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> "list[int]": """simple docstring""" if upper_limit < 0: raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' ) A__ = [0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 A__ = 1 if upper_limit > 0: A__ = 1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 , upper_limit + 1 ): for j in range(lowercase_ ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""") print("""\n*** Enter -1 at any time to quit ***""") print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""") try: while True: _lowerCamelCase : List[Any] = int(input().strip()) if N < 0: print("""\n********* Goodbye!! ************""") break else: print(F'''The Catalan numbers from 0 through {N} are:''') print(catalan_numbers(N)) print("""Try another upper limit for the sequence: """, end="""""") except (NameError, ValueError): print("""\n********* Invalid input, goodbye! ************\n""") import doctest doctest.testmod()
14
1
import os import textwrap import pyarrow as pa import pytest from datasets import ClassLabel, Features, Image from datasets.packaged_modules.csv.csv import Csv from ..utils import require_pil @pytest.fixture def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Union[str, Any]: """simple docstring""" A__ = tmp_path / '''file.csv''' A__ = textwrap.dedent( '''\ header1,header2 1,2 10,20 ''' ) with open(lowercase_ , '''w''' ) as f: f.write(lowercase_ ) return str(lowercase_ ) @pytest.fixture def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[Any]: """simple docstring""" A__ = tmp_path / '''malformed_file.csv''' A__ = textwrap.dedent( '''\ header1,header2 1,2 10,20, ''' ) with open(lowercase_ , '''w''' ) as f: f.write(lowercase_ ) return str(lowercase_ ) @pytest.fixture def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str: """simple docstring""" A__ = tmp_path / '''csv_with_image.csv''' A__ = textwrap.dedent( f"""\ image {image_file} """ ) with open(lowercase_ , '''w''' ) as f: f.write(lowercase_ ) return str(lowercase_ ) @pytest.fixture def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]: """simple docstring""" A__ = tmp_path / '''csv_with_label.csv''' A__ = textwrap.dedent( '''\ label good bad good ''' ) with open(lowercase_ , '''w''' ) as f: f.write(lowercase_ ) return str(lowercase_ ) @pytest.fixture def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple: """simple docstring""" A__ = tmp_path / '''csv_with_int_list.csv''' A__ = textwrap.dedent( '''\ int_list 1 2 3 4 5 6 7 8 9 ''' ) with open(lowercase_ , '''w''' ) as f: f.write(lowercase_ ) return str(lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Dict: """simple docstring""" A__ = Csv() A__ = csv._generate_tables([[csv_file, malformed_csv_file]] ) with pytest.raises(lowercase_ , match='''Error tokenizing data''' ): for _ in generator: pass assert any( record.levelname == '''ERROR''' and '''Failed to read file''' in record.message and os.path.basename(lowercase_ ) in record.message for record in caplog.records ) @require_pil def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Union[str, Any]: """simple docstring""" with open(lowercase_ , encoding='''utf-8''' ) as f: A__ = f.read().splitlines()[1] A__ = Csv(encoding='''utf-8''' , features=Features({'''image''': Image()} ) ) A__ = csv._generate_tables([[csv_file_with_image]] ) A__ = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('''image''' ).type == Image()() A__ = pa_table.to_pydict()['''image'''] assert generated_content == [{"path": image_file, "bytes": None}] def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple: """simple docstring""" with open(lowercase_ , encoding='''utf-8''' ) as f: A__ = f.read().splitlines()[1:] A__ = Csv(encoding='''utf-8''' , features=Features({'''label''': ClassLabel(names=['''good''', '''bad'''] )} ) ) A__ = csv._generate_tables([[csv_file_with_label]] ) A__ = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('''label''' ).type == ClassLabel(names=['''good''', '''bad'''] )() A__ = pa_table.to_pydict()['''label'''] assert generated_content == [ClassLabel(names=['''good''', '''bad'''] ).straint(lowercase_ ) for label in labels] def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple: """simple docstring""" A__ = Csv(encoding='''utf-8''' , sep=''',''' , converters={'''int_list''': lambda lowercase_ : [int(lowercase_ ) for i in x.split()]} ) A__ = csv._generate_tables([[csv_file_with_int_list]] ) A__ = pa.concat_tables([table for _, table in generator] ) assert pa.types.is_list(pa_table.schema.field('''int_list''' ).type ) A__ = pa_table.to_pydict()['''int_list'''] assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
14
import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict: """simple docstring""" A__ = args.pruning_method A__ = args.threshold A__ = args.model_name_or_path.rstrip('''/''' ) A__ = args.target_model_path print(f"""Load fine-pruned model from {model_name_or_path}""" ) A__ = torch.load(os.path.join(lowercase_ , '''pytorch_model.bin''' ) ) A__ = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: A__ = tensor print(f"""Copied layer {name}""" ) elif "classifier" in name or "qa_output" in name: A__ = tensor print(f"""Copied layer {name}""" ) elif "bias" in name: A__ = tensor print(f"""Copied layer {name}""" ) else: if pruning_method == "magnitude": A__ = MagnitudeBinarizer.apply(inputs=lowercase_ , threshold=lowercase_ ) A__ = tensor * mask print(f"""Pruned layer {name}""" ) elif pruning_method == "topK": if "mask_scores" in name: continue A__ = name[:-6] A__ = model[f"""{prefix_}mask_scores"""] A__ = TopKBinarizer.apply(lowercase_ , lowercase_ ) A__ = tensor * mask print(f"""Pruned layer {name}""" ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue A__ = name[:-6] A__ = model[f"""{prefix_}mask_scores"""] A__ = ThresholdBinarizer.apply(lowercase_ , lowercase_ , lowercase_ ) A__ = tensor * mask print(f"""Pruned layer {name}""" ) elif pruning_method == "l0": if "mask_scores" in name: continue A__ = name[:-6] A__ = model[f"""{prefix_}mask_scores"""] A__ , A__ = -0.1, 1.1 A__ = torch.sigmoid(lowercase_ ) A__ = s * (r - l) + l A__ = s_bar.clamp(min=0.0 , max=1.0 ) A__ = tensor * mask print(f"""Pruned layer {name}""" ) else: raise ValueError('''Unknown pruning method''' ) if target_model_path is None: A__ = os.path.join( os.path.dirname(lowercase_ ) , f"""bertarized_{os.path.basename(lowercase_ )}""" ) if not os.path.isdir(lowercase_ ): shutil.copytree(lowercase_ , lowercase_ ) print(f"""\nCreated folder {target_model_path}""" ) torch.save(lowercase_ , os.path.join(lowercase_ , '''pytorch_model.bin''' ) ) print('''\nPruned model saved! See you later!''' ) if __name__ == "__main__": _lowerCamelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument( """--pruning_method""", choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""], type=str, required=True, help=( """Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,""" """ sigmoied_threshold = Soft movement pruning)""" ), ) parser.add_argument( """--threshold""", type=float, required=False, help=( """For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.""" """For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.""" """Not needed for `l0`""" ), ) parser.add_argument( """--model_name_or_path""", type=str, required=True, help="""Folder containing the model that was previously fine-pruned""", ) parser.add_argument( """--target_model_path""", default=None, type=str, required=False, help="""Folder containing the model that was previously fine-pruned""", ) _lowerCamelCase : int = parser.parse_args() main(args)
14
1
from typing import Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ): '''simple docstring''' @register_to_config def __init__( self : Dict , UpperCAmelCase__ : int = 768 , ) ->Tuple: '''simple docstring''' super().__init__() A__ = nn.Parameter(torch.zeros(1 , UpperCAmelCase__)) A__ = nn.Parameter(torch.ones(1 , UpperCAmelCase__)) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Union[str, torch.device]] = None , UpperCAmelCase__ : Optional[torch.dtype] = None , ) ->List[str]: '''simple docstring''' A__ = nn.Parameter(self.mean.to(UpperCAmelCase__).to(UpperCAmelCase__)) A__ = nn.Parameter(self.std.to(UpperCAmelCase__).to(UpperCAmelCase__)) return self def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : Tuple) ->Dict: '''simple docstring''' A__ = (embeds - self.mean) * 1.0 / self.std return embeds def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : Union[str, Any]) ->str: '''simple docstring''' A__ = (embeds * self.std) + self.mean return embeds
14
_lowerCamelCase : Optional[int] = 65521 def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int: """simple docstring""" A__ = 1 A__ = 0 for plain_chr in plain_text: A__ = (a + ord(lowercase_ )) % MOD_ADLER A__ = (b + a) % MOD_ADLER return (b << 16) | a
14
1
from __future__ import annotations def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = False , ) -> tuple[int, float, str]: """simple docstring""" A__ = cipher_alphabet or [chr(lowercase_ ) for i in range(97 , 123 )] # If the argument is None or the user provided an empty dictionary if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) A__ = { '''a''': 0.0_84_97, '''b''': 0.0_14_92, '''c''': 0.0_22_02, '''d''': 0.0_42_53, '''e''': 0.1_11_62, '''f''': 0.0_22_28, '''g''': 0.0_20_15, '''h''': 0.0_60_94, '''i''': 0.0_75_46, '''j''': 0.0_01_53, '''k''': 0.0_12_92, '''l''': 0.0_40_25, '''m''': 0.0_24_06, '''n''': 0.0_67_49, '''o''': 0.0_75_07, '''p''': 0.0_19_29, '''q''': 0.0_00_95, '''r''': 0.0_75_87, '''s''': 0.0_63_27, '''t''': 0.0_93_56, '''u''': 0.0_27_58, '''v''': 0.0_09_78, '''w''': 0.0_25_60, '''x''': 0.0_01_50, '''y''': 0.0_19_94, '''z''': 0.0_00_77, } else: # Custom frequencies dictionary A__ = frequencies_dict if not case_sensitive: A__ = ciphertext.lower() # Chi squared statistic values A__ = {} # cycle through all of the shifts for shift in range(len(lowercase_ ) ): A__ = '''''' # decrypt the message with the shift for letter in ciphertext: try: # Try to index the letter in the alphabet A__ = (alphabet_letters.index(letter.lower() ) - shift) % len( lowercase_ ) decrypted_with_shift += ( alphabet_letters[new_key].upper() if case_sensitive and letter.isupper() else alphabet_letters[new_key] ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter A__ = 0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: if case_sensitive: A__ = letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message A__ = decrypted_with_shift.lower().count(lowercase_ ) # Get the excepcted amount of times the letter should appear based # on letter frequencies A__ = frequencies[letter] * occurrences # Complete the chi squared statistic formula A__ = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value else: if letter.lower() in frequencies: # Get the amount of times the letter occurs in the message A__ = decrypted_with_shift.count(lowercase_ ) # Get the excepcted amount of times the letter should appear based # on letter frequencies A__ = frequencies[letter] * occurrences # Complete the chi squared statistic formula A__ = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary A__ = ( chi_squared_statistic, decrypted_with_shift, ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic def chi_squared_statistic_values_sorting_key(lowercase_ ) -> tuple[float, str]: return chi_squared_statistic_values[key] A__ = min( lowercase_ , key=lowercase_ , ) # Get all the data from the most likely cipher (key, decoded message) ( ( A__ ) , ( A__ ) , ) = chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( most_likely_cipher, most_likely_cipher_chi_squared_value, decoded_most_likely_cipher, )
14
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer _lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) _lowerCamelCase : Tuple = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} _lowerCamelCase : Union[str, Any] = { """vocab_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } _lowerCamelCase : str = { """vocab_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } _lowerCamelCase : str = { """vocab_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json""" ), }, } _lowerCamelCase : Any = { """facebook/dpr-ctx_encoder-single-nq-base""": 512, """facebook/dpr-ctx_encoder-multiset-base""": 512, } _lowerCamelCase : List[str] = { """facebook/dpr-question_encoder-single-nq-base""": 512, """facebook/dpr-question_encoder-multiset-base""": 512, } _lowerCamelCase : Tuple = { """facebook/dpr-reader-single-nq-base""": 512, """facebook/dpr-reader-multiset-base""": 512, } _lowerCamelCase : Optional[Any] = { """facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True}, } _lowerCamelCase : Optional[int] = { """facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True}, } _lowerCamelCase : Optional[Any] = { """facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True}, } class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCAmelCase__ = DPRContextEncoderTokenizer class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCAmelCase__ = DPRQuestionEncoderTokenizer _lowerCamelCase : int = collections.namedtuple( """DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""] ) _lowerCamelCase : Any = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""]) _lowerCamelCase : Dict = r""" Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Return: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. """ @add_start_docstrings(UpperCAmelCase__ ) class UpperCamelCase_ : '''simple docstring''' def __call__( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Union[bool, str] = False , UpperCAmelCase__ : Union[bool, str] = False , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Optional[bool] = None , **UpperCAmelCase__ : Optional[int] , ) ->BatchEncoding: '''simple docstring''' if titles is None and texts is None: return super().__call__( UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , ) elif titles is None or texts is None: A__ = titles if texts is None else texts return super().__call__( UpperCAmelCase__ , UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , ) A__ = titles if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [titles] A__ = texts if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [texts] A__ = len(UpperCAmelCase__) A__ = questions if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [questions] * n_passages assert len(UpperCAmelCase__) == len( UpperCAmelCase__), f"""There should be as many titles than texts but got {len(UpperCAmelCase__)} titles and {len(UpperCAmelCase__)} texts.""" A__ = super().__call__(UpperCAmelCase__ , UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__)['''input_ids'''] A__ = super().__call__(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__)['''input_ids'''] A__ = { '''input_ids''': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(UpperCAmelCase__ , UpperCAmelCase__) ] } if return_attention_mask is not False: A__ = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids]) A__ = attention_mask return self.pad(UpperCAmelCase__ , padding=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : BatchEncoding , UpperCAmelCase__ : DPRReaderOutput , UpperCAmelCase__ : int = 16 , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : int = 4 , ) ->List[DPRSpanPrediction]: '''simple docstring''' A__ = reader_input['''input_ids'''] A__ , A__ , A__ = reader_output[:3] A__ = len(UpperCAmelCase__) A__ = sorted(range(UpperCAmelCase__) , reverse=UpperCAmelCase__ , key=relevance_logits.__getitem__) A__ = [] for doc_id in sorted_docs: A__ = list(input_ids[doc_id]) # assuming question & title information is at the beginning of the sequence A__ = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: A__ = sequence_ids.index(self.pad_token_id) else: A__ = len(UpperCAmelCase__) A__ = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCAmelCase__ , top_spans=UpperCAmelCase__ , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCAmelCase__ , start_index=UpperCAmelCase__ , end_index=UpperCAmelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1]) , )) if len(UpperCAmelCase__) >= num_spans: break return nbest_spans_predictions[:num_spans] def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , ) ->List[DPRSpanPrediction]: '''simple docstring''' A__ = [] for start_index, start_score in enumerate(UpperCAmelCase__): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]): scores.append(((start_index, start_index + answer_length), start_score + end_score)) A__ = sorted(UpperCAmelCase__ , key=lambda UpperCAmelCase__: x[1] , reverse=UpperCAmelCase__) A__ = [] for (start_index, end_index), score in scores: assert start_index <= end_index, f"""Wrong span indices: [{start_index}:{end_index}]""" A__ = end_index - start_index + 1 assert length <= max_answer_length, f"""Span is too long: {length} > {max_answer_length}""" if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals): continue chosen_span_intervals.append((start_index, end_index)) if len(UpperCAmelCase__) == top_spans: break return chosen_span_intervals @add_end_docstrings(UpperCAmelCase__ ) class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = READER_PRETRAINED_INIT_CONFIGURATION UpperCAmelCase__ = ['''input_ids''', '''attention_mask'''] UpperCAmelCase__ = DPRReaderTokenizer
14
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _lowerCamelCase : str = { """configuration_vision_text_dual_encoder""": ["""VisionTextDualEncoderConfig"""], """processing_vision_text_dual_encoder""": ["""VisionTextDualEncoderProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Any = ["""VisionTextDualEncoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Dict = ["""FlaxVisionTextDualEncoderModel"""] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Union[str, Any] = ["""TFVisionTextDualEncoderModel"""] if TYPE_CHECKING: from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel else: import sys _lowerCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
14
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase : Any = logging.get_logger(__name__) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''encoder-decoder''' UpperCAmelCase__ = True def __init__( self : List[str] , **UpperCAmelCase__ : Union[str, Any]) ->List[Any]: '''simple docstring''' super().__init__(**UpperCAmelCase__) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" A__ = kwargs.pop('''encoder''') A__ = encoder_config.pop('''model_type''') A__ = kwargs.pop('''decoder''') A__ = decoder_config.pop('''model_type''') from ..auto.configuration_auto import AutoConfig A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__) A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__) A__ = True @classmethod def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : Union[str, Any]) ->PretrainedConfig: '''simple docstring''' logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''') A__ = True A__ = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]: '''simple docstring''' A__ = copy.deepcopy(self.__dict__) A__ = self.encoder.to_dict() A__ = self.decoder.to_dict() A__ = self.__class__.model_type return output
14
1
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> list: """simple docstring""" A__ = len(lowercase_ ) A__ = [] for i in range(len(lowercase_ ) - pat_len + 1 ): A__ = True for j in range(lowercase_ ): if s[i + j] != pattern[j]: A__ = False break if match_found: position.append(lowercase_ ) return position if __name__ == "__main__": assert naive_pattern_search("""ABCDEFG""", """DE""") == [3] print(naive_pattern_search("""ABAAABCDBBABCDDEBCABC""", """ABC"""))
14
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any: """simple docstring""" A__ = [0] * len(lowercase_ ) A__ = [] A__ = [1] * len(lowercase_ ) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(lowercase_ ) ): if indegree[i] == 0: queue.append(lowercase_ ) while queue: A__ = queue.pop(0 ) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: A__ = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(lowercase_ ) print(max(lowercase_ ) ) # Adjacency list of Graph _lowerCamelCase : Optional[int] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
14
1
from __future__ import annotations import pandas as pd def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> list[int]: """simple docstring""" A__ = [0] * no_of_processes A__ = [0] * no_of_processes # Copy the burst time into remaining_time[] for i in range(lowercase_ ): A__ = burst_time[i] A__ = 0 A__ = 0 A__ = 999_999_999 A__ = 0 A__ = False # Process until all processes are completed while complete != no_of_processes: for j in range(lowercase_ ): if arrival_time[j] <= increment_time and remaining_time[j] > 0: if remaining_time[j] < minm: A__ = remaining_time[j] A__ = j A__ = True if not check: increment_time += 1 continue remaining_time[short] -= 1 A__ = remaining_time[short] if minm == 0: A__ = 999_999_999 if remaining_time[short] == 0: complete += 1 A__ = False # Find finish time of current process A__ = increment_time + 1 # Calculate waiting time A__ = finish_time - arrival_time[short] A__ = finar - burst_time[short] if waiting_time[short] < 0: A__ = 0 # Increment time increment_time += 1 return waiting_time def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> list[int]: """simple docstring""" A__ = [0] * no_of_processes for i in range(lowercase_ ): A__ = burst_time[i] + waiting_time[i] return turn_around_time def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> None: """simple docstring""" A__ = 0 A__ = 0 for i in range(lowercase_ ): A__ = total_waiting_time + waiting_time[i] A__ = total_turn_around_time + turn_around_time[i] print(f"""Average waiting time = {total_waiting_time / no_of_processes:.5f}""" ) print('''Average turn around time =''' , total_turn_around_time / no_of_processes ) if __name__ == "__main__": print("""Enter how many process you want to analyze""") _lowerCamelCase : str = int(input()) _lowerCamelCase : Optional[int] = [0] * no_of_processes _lowerCamelCase : str = [0] * no_of_processes _lowerCamelCase : Tuple = list(range(1, no_of_processes + 1)) for i in range(no_of_processes): print("""Enter the arrival time and burst time for process:--""" + str(i + 1)) _lowerCamelCase , _lowerCamelCase : Union[str, Any] = map(int, input().split()) _lowerCamelCase : List[str] = calculate_waitingtime(arrival_time, burst_time, no_of_processes) _lowerCamelCase : Any = burst_time _lowerCamelCase : Optional[Any] = no_of_processes _lowerCamelCase : Any = waiting_time _lowerCamelCase : Optional[int] = calculate_turnaroundtime(bt, n, wt) calculate_average_times(waiting_time, turn_around_time, no_of_processes) _lowerCamelCase : Optional[Any] = pd.DataFrame( list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)), columns=[ """Process""", """BurstTime""", """ArrivalTime""", """WaitingTime""", """TurnAroundTime""", ], ) # Printing the dataFrame pd.set_option("""display.max_rows""", fcfs.shape[0] + 1) print(fcfs)
14
import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline _lowerCamelCase : Optional[Any] = datasets.utils.logging.get_logger(__name__) @dataclass class UpperCamelCase_ ( datasets.BuilderConfig ): '''simple docstring''' UpperCAmelCase__ = None UpperCAmelCase__ = "utf-8" UpperCAmelCase__ = None UpperCAmelCase__ = None UpperCAmelCase__ = True # deprecated UpperCAmelCase__ = None # deprecated UpperCAmelCase__ = 10 << 20 # 10MB UpperCAmelCase__ = None class UpperCamelCase_ ( datasets.ArrowBasedBuilder ): '''simple docstring''' UpperCAmelCase__ = JsonConfig def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str: '''simple docstring''' if self.config.block_size is not None: logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''') A__ = self.config.block_size if self.config.use_threads is not True: logger.warning( '''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''') if self.config.newlines_in_values is not None: raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''') return datasets.DatasetInfo(features=self.config.features) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : List[Any]) ->Dict: '''simple docstring''' if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""") A__ = dl_manager.download_and_extract(self.config.data_files) if isinstance(UpperCAmelCase__ , (str, list, tuple)): A__ = data_files if isinstance(UpperCAmelCase__ , UpperCAmelCase__): A__ = [files] A__ = [dl_manager.iter_files(UpperCAmelCase__) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files})] A__ = [] for split_name, files in data_files.items(): if isinstance(UpperCAmelCase__ , UpperCAmelCase__): A__ = [files] A__ = [dl_manager.iter_files(UpperCAmelCase__) for file in files] splits.append(datasets.SplitGenerator(name=UpperCAmelCase__ , gen_kwargs={'''files''': files})) return splits def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : pa.Table) ->pa.Table: '''simple docstring''' if self.config.features is not None: # adding missing columns for column_name in set(self.config.features) - set(pa_table.column_names): A__ = self.config.features.arrow_schema.field(UpperCAmelCase__).type A__ = pa_table.append_column(UpperCAmelCase__ , pa.array([None] * len(UpperCAmelCase__) , type=UpperCAmelCase__)) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example A__ = table_cast(UpperCAmelCase__ , self.config.features.arrow_schema) return pa_table def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Tuple) ->str: '''simple docstring''' for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase__)): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(UpperCAmelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f: A__ = json.load(UpperCAmelCase__) # We keep only the field we are interested in A__ = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(UpperCAmelCase__ , (list, tuple)): A__ = set().union(*[row.keys() for row in dataset]) A__ = {col: [row.get(UpperCAmelCase__) for row in dataset] for col in keys} else: A__ = dataset A__ = pa.Table.from_pydict(UpperCAmelCase__) yield file_idx, self._cast_table(UpperCAmelCase__) # If the file has one json object per line else: with open(UpperCAmelCase__ , '''rb''') as f: A__ = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small A__ = max(self.config.chunksize // 32 , 16 << 10) A__ = ( self.config.encoding_errors if self.config.encoding_errors is not None else '''strict''' ) while True: A__ = f.read(self.config.chunksize) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(UpperCAmelCase__) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": A__ = batch.decode(self.config.encoding , errors=UpperCAmelCase__).encode('''utf-8''') try: while True: try: A__ = paj.read_json( io.BytesIO(UpperCAmelCase__) , read_options=paj.ReadOptions(block_size=UpperCAmelCase__)) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(UpperCAmelCase__ , pa.ArrowInvalid) and "straddling" not in str(UpperCAmelCase__) or block_size > len(UpperCAmelCase__) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( f"""Batch of {len(UpperCAmelCase__)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""") block_size *= 2 except pa.ArrowInvalid as e: try: with open( UpperCAmelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f: A__ = json.load(UpperCAmelCase__) except json.JSONDecodeError: logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""") raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(UpperCAmelCase__ , UpperCAmelCase__): # list is the only sequence type supported in JSON try: A__ = set().union(*[row.keys() for row in dataset]) A__ = {col: [row.get(UpperCAmelCase__) for row in dataset] for col in keys} A__ = pa.Table.from_pydict(UpperCAmelCase__) except (pa.ArrowInvalid, AttributeError) as e: logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""") raise ValueError(f"""Not able to read records in the JSON file at {file}.""") from None yield file_idx, self._cast_table(UpperCAmelCase__) break else: logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""") raise ValueError( f"""Not able to read records in the JSON file at {file}. """ f"""You should probably indicate the field of the JSON file containing your records. """ f"""This JSON file contain the following fields: {str(list(dataset.keys()))}. """ f"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase__) batch_idx += 1
14
1
from argparse import ArgumentParser from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline from ..utils import logging from . import BaseTransformersCLICommand _lowerCamelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict: """simple docstring""" if not path: return "pipe" for ext in PipelineDataFormat.SUPPORTED_FORMATS: if path.endswith(lowercase_ ): return ext raise Exception( f"""Unable to determine file format from file extension {path}. """ f"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" ) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any: """simple docstring""" A__ = pipeline( task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , ) A__ = try_infer_format_from_ext(args.input ) if args.format == '''infer''' else args.format A__ = PipelineDataFormat.from_str( format=lowercase_ , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , ) return RunCommand(lowercase_ , lowercase_ ) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : List[Any] , UpperCAmelCase__ : Pipeline , UpperCAmelCase__ : PipelineDataFormat) ->Union[str, Any]: '''simple docstring''' A__ = nlp A__ = reader @staticmethod def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ : ArgumentParser) ->Any: '''simple docstring''' A__ = parser.add_parser('''run''' , help='''Run a pipeline through the CLI''') run_parser.add_argument('''--task''' , choices=get_supported_tasks() , help='''Task to run''') run_parser.add_argument('''--input''' , type=UpperCAmelCase__ , help='''Path to the file to use for inference''') run_parser.add_argument('''--output''' , type=UpperCAmelCase__ , help='''Path to the file that will be used post to write results.''') run_parser.add_argument('''--model''' , type=UpperCAmelCase__ , help='''Name or path to the model to instantiate.''') run_parser.add_argument('''--config''' , type=UpperCAmelCase__ , help='''Name or path to the model\'s config to instantiate.''') run_parser.add_argument( '''--tokenizer''' , type=UpperCAmelCase__ , help='''Name of the tokenizer to use. (default: same as the model name)''') run_parser.add_argument( '''--column''' , type=UpperCAmelCase__ , help='''Name of the column to use as input. (For multi columns input as QA use column1,columns2)''' , ) run_parser.add_argument( '''--format''' , type=UpperCAmelCase__ , default='''infer''' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='''Input format to read from''' , ) run_parser.add_argument( '''--device''' , type=UpperCAmelCase__ , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , ) run_parser.add_argument('''--overwrite''' , action='''store_true''' , help='''Allow overwriting the output file.''') run_parser.set_defaults(func=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int: '''simple docstring''' A__ , A__ = self._nlp, [] for entry in self._reader: A__ = nlp(**UpperCAmelCase__) if self._reader.is_multi_columns else nlp(UpperCAmelCase__) if isinstance(UpperCAmelCase__ , UpperCAmelCase__): outputs.append(UpperCAmelCase__) else: outputs += output # Saving data if self._nlp.binary_output: A__ = self._reader.save_binary(UpperCAmelCase__) logger.warning(f"""Current pipeline requires output to be in binary format, saving at {binary_path}""") else: self._reader.save(UpperCAmelCase__)
14
import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch _lowerCamelCase : List[Any] = """sshleifer/bart-tiny-random""" _lowerCamelCase : List[Any] = """patrickvonplaten/t5-tiny-random""" @require_torch class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int: '''simple docstring''' return AutoConfig.from_pretrained(UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Any: '''simple docstring''' A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1) self.assertEqual(student.config.num_hidden_layers , 1) def SCREAMING_SNAKE_CASE ( self : int) ->Any: '''simple docstring''' A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]: '''simple docstring''' A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase__) self.assertEqual(student.config.encoder_layers , 1) self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers) def SCREAMING_SNAKE_CASE ( self : Dict) ->int: '''simple docstring''' A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1) self.assertEqual(student.config.encoder_layers , 1) self.assertEqual(student.config.decoder_layers , 1) def SCREAMING_SNAKE_CASE ( self : str) ->List[Any]: '''simple docstring''' with self.assertRaises(UpperCAmelCase__): create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=UpperCAmelCase__ , d=UpperCAmelCase__)
14
1
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> bool: """simple docstring""" A__ = set() # To detect a back edge, keep track of vertices currently in the recursion stack A__ = set() return any( node not in visited and depth_first_search(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) for node in graph ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> bool: """simple docstring""" visited.add(lowercase_ ) rec_stk.add(lowercase_ ) for node in graph[vertex]: if node not in visited: if depth_first_search(lowercase_ , lowercase_ , lowercase_ , lowercase_ ): return True elif node in rec_stk: return True # The node needs to be removed from recursion stack before function ends rec_stk.remove(lowercase_ ) return False if __name__ == "__main__": from doctest import testmod testmod()
14
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : Dict=3 , UpperCAmelCase__ : List[Any]=30 , UpperCAmelCase__ : Any=400 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Optional[Any]=[0.5, 0.5, 0.5] , UpperCAmelCase__ : Any=[0.5, 0.5, 0.5] , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Optional[int]=1 / 255 , UpperCAmelCase__ : Optional[Any]=True , ) ->str: '''simple docstring''' A__ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333} A__ = parent A__ = batch_size A__ = num_channels A__ = min_resolution A__ = max_resolution A__ = do_resize A__ = size A__ = do_normalize A__ = image_mean A__ = image_std A__ = do_rescale A__ = rescale_factor A__ = do_pad def SCREAMING_SNAKE_CASE ( self : Any) ->List[str]: '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int=False) ->Optional[Any]: '''simple docstring''' if not batched: A__ = image_inputs[0] if isinstance(UpperCAmelCase__ , Image.Image): A__ , A__ = image.size else: A__ , A__ = image.shape[1], image.shape[2] if w < h: A__ = int(self.size['''shortest_edge'''] * h / w) A__ = self.size['''shortest_edge'''] elif w > h: A__ = self.size['''shortest_edge'''] A__ = int(self.size['''shortest_edge'''] * w / h) else: A__ = self.size['''shortest_edge'''] A__ = self.size['''shortest_edge'''] else: A__ = [] for image in image_inputs: A__ , A__ = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) A__ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__: item[0])[0] A__ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__: item[1])[1] return expected_height, expected_width @require_torch @require_vision class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = DeformableDetrImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple: '''simple docstring''' A__ = DeformableDetrImageProcessingTester(self) @property def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(UpperCAmelCase__ , '''image_mean''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''image_std''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_normalize''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_rescale''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_pad''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''size''')) def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int: '''simple docstring''' A__ = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1_333}) self.assertEqual(image_processor.do_pad , UpperCAmelCase__) A__ = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCAmelCase__) self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84}) self.assertEqual(image_processor.do_pad , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Any) ->List[str]: '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) # create random PIL images A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , Image.Image) # Test not batched input A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__) A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , np.ndarray) # Test not batched input A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE ( self : int) ->Tuple: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , torch.Tensor) # Test not batched input A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[str]: '''simple docstring''' A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''') with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''') as f: A__ = json.loads(f.read()) A__ = {'''image_id''': 39_769, '''annotations''': target} # encode them A__ = DeformableDetrImageProcessor() A__ = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , return_tensors='''pt''') # verify pixel values A__ = torch.Size([1, 3, 800, 1_066]) self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase__) A__ = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase__ , atol=1e-4)) # verify area A__ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase__)) # verify boxes A__ = torch.Size([6, 4]) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase__) A__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase__ , atol=1e-3)) # verify image_id A__ = torch.tensor([39_769]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase__)) # verify is_crowd A__ = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase__)) # verify class_labels A__ = torch.tensor([75, 75, 63, 65, 17, 17]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase__)) # verify orig_size A__ = torch.tensor([480, 640]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase__)) # verify size A__ = torch.tensor([800, 1_066]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase__)) @slow def SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[int]: '''simple docstring''' A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''') with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''') as f: A__ = json.loads(f.read()) A__ = {'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target} A__ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''') # encode them A__ = DeformableDetrImageProcessor(format='''coco_panoptic''') A__ = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , masks_path=UpperCAmelCase__ , return_tensors='''pt''') # verify pixel values A__ = torch.Size([1, 3, 800, 1_066]) self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase__) A__ = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase__ , atol=1e-4)) # verify area A__ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase__)) # verify boxes A__ = torch.Size([6, 4]) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase__) A__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase__ , atol=1e-3)) # verify image_id A__ = torch.tensor([39_769]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase__)) # verify is_crowd A__ = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase__)) # verify class_labels A__ = torch.tensor([17, 17, 63, 75, 75, 93]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase__)) # verify masks A__ = 822_873 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCAmelCase__) # verify orig_size A__ = torch.tensor([480, 640]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase__)) # verify size A__ = torch.tensor([800, 1_066]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase__))
14
1
print((lambda quine: quine % quine)("""print((lambda quine: quine %% quine)(%r))"""))
14
from __future__ import annotations import typing from collections.abc import Iterable import numpy as np _lowerCamelCase : str = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007 _lowerCamelCase : Tuple = typing.Union[np.floataa, int, float] # noqa: UP007 def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> VectorOut: """simple docstring""" return np.sqrt(np.sum((np.asarray(lowercase_ ) - np.asarray(lowercase_ )) ** 2 ) ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> VectorOut: """simple docstring""" return sum((va - va) ** 2 for va, va in zip(lowercase_ , lowercase_ ) ) ** (1 / 2) if __name__ == "__main__": def SCREAMING_SNAKE_CASE ( ) -> None: """simple docstring""" from timeit import timeit print('''Without Numpy''' ) print( timeit( '''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ) ) print('''With Numpy''' ) print( timeit( '''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ) ) benchmark()
14
1
from __future__ import annotations def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> tuple[float, list[float]]: """simple docstring""" A__ = list(range(len(lowercase_ ) ) ) A__ = [v / w for v, w in zip(lowercase_ , lowercase_ )] index.sort(key=lambda lowercase_ : ratio[i] , reverse=lowercase_ ) A__ = 0 A__ = [0] * len(lowercase_ ) for i in index: if weight[i] <= capacity: A__ = 1 max_value += value[i] capacity -= weight[i] else: A__ = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
14
from ...processing_utils import ProcessorMixin class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''SpeechT5FeatureExtractor''' UpperCAmelCase__ = '''SpeechT5Tokenizer''' def __init__( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple) ->Union[str, Any]: '''simple docstring''' super().__init__(UpperCAmelCase__ , UpperCAmelCase__) def __call__( self : Dict , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Any) ->Optional[Any]: '''simple docstring''' A__ = kwargs.pop('''audio''' , UpperCAmelCase__) A__ = kwargs.pop('''text''' , UpperCAmelCase__) A__ = kwargs.pop('''text_target''' , UpperCAmelCase__) A__ = kwargs.pop('''audio_target''' , UpperCAmelCase__) A__ = kwargs.pop('''sampling_rate''' , UpperCAmelCase__) if audio is not None and text is not None: raise ValueError( '''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''') if audio_target is not None and text_target is not None: raise ValueError( '''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''') if audio is None and audio_target is None and text is None and text_target is None: raise ValueError( '''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''') if audio is not None: A__ = self.feature_extractor(UpperCAmelCase__ , *UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , **UpperCAmelCase__) elif text is not None: A__ = self.tokenizer(UpperCAmelCase__ , **UpperCAmelCase__) else: A__ = None if audio_target is not None: A__ = self.feature_extractor(audio_target=UpperCAmelCase__ , *UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , **UpperCAmelCase__) A__ = targets['''input_values'''] elif text_target is not None: A__ = self.tokenizer(UpperCAmelCase__ , **UpperCAmelCase__) A__ = targets['''input_ids'''] else: A__ = None if inputs is None: return targets if targets is not None: A__ = labels A__ = targets.get('''attention_mask''') if decoder_attention_mask is not None: A__ = decoder_attention_mask return inputs def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : int) ->Optional[int]: '''simple docstring''' A__ = kwargs.pop('''input_values''' , UpperCAmelCase__) A__ = kwargs.pop('''input_ids''' , UpperCAmelCase__) A__ = kwargs.pop('''labels''' , UpperCAmelCase__) if input_values is not None and input_ids is not None: raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''') if input_values is None and input_ids is None and labels is None: raise ValueError( '''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''') if input_values is not None: A__ = self.feature_extractor.pad(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__) elif input_ids is not None: A__ = self.tokenizer.pad(UpperCAmelCase__ , **UpperCAmelCase__) else: A__ = None if labels is not None: if "input_ids" in labels or (isinstance(UpperCAmelCase__ , UpperCAmelCase__) and "input_ids" in labels[0]): A__ = self.tokenizer.pad(UpperCAmelCase__ , **UpperCAmelCase__) A__ = targets['''input_ids'''] else: A__ = self.feature_extractor.feature_size A__ = self.feature_extractor.num_mel_bins A__ = self.feature_extractor.pad(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__) A__ = feature_size_hack A__ = targets['''input_values'''] else: A__ = None if inputs is None: return targets if targets is not None: A__ = labels A__ = targets.get('''attention_mask''') if decoder_attention_mask is not None: A__ = decoder_attention_mask return inputs def SCREAMING_SNAKE_CASE ( self : Any , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Optional[Any]) ->Optional[Any]: '''simple docstring''' return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Union[str, Any]) ->Dict: '''simple docstring''' return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__)
14
1
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = StableDiffusionInpaintPipeline UpperCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS UpperCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS UpperCAmelCase__ = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess UpperCAmelCase__ = frozenset([] ) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[Any]: '''simple docstring''' torch.manual_seed(0) A__ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCAmelCase__ , ) A__ = PNDMScheduler(skip_prk_steps=UpperCAmelCase__) torch.manual_seed(0) A__ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0) A__ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , ) A__ = CLIPTextModel(UpperCAmelCase__) A__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''') A__ = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any]=0) ->Optional[Any]: '''simple docstring''' A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase__)).to(UpperCAmelCase__) A__ = image.cpu().permute(0 , 2 , 3 , 1)[0] A__ = Image.fromarray(np.uinta(UpperCAmelCase__)).convert('''RGB''').resize((64, 64)) A__ = Image.fromarray(np.uinta(image + 4)).convert('''RGB''').resize((64, 64)) if str(UpperCAmelCase__).startswith('''mps'''): A__ = torch.manual_seed(UpperCAmelCase__) else: A__ = torch.Generator(device=UpperCAmelCase__).manual_seed(UpperCAmelCase__) A__ = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': init_image, '''mask_image''': mask_image, '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def SCREAMING_SNAKE_CASE ( self : Dict) ->List[Any]: '''simple docstring''' A__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator A__ = self.get_dummy_components() A__ = StableDiffusionInpaintPipeline(**UpperCAmelCase__) A__ = sd_pipe.to(UpperCAmelCase__) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__) A__ = self.get_dummy_inputs(UpperCAmelCase__) A__ = sd_pipe(**UpperCAmelCase__).images A__ = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) A__ = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def SCREAMING_SNAKE_CASE ( self : int) ->Dict: '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3e-3) @slow @require_torch_gpu class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE ( self : Tuple) ->int: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int: '''simple docstring''' A__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''') A__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''') A__ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint''' '''/yellow_cat_sitting_on_a_park_bench.npy''') A__ = '''stabilityai/stable-diffusion-2-inpainting''' A__ = StableDiffusionInpaintPipeline.from_pretrained(UpperCAmelCase__ , safety_checker=UpperCAmelCase__) pipe.to(UpperCAmelCase__) pipe.set_progress_bar_config(disable=UpperCAmelCase__) pipe.enable_attention_slicing() A__ = '''Face of a yellow cat, high resolution, sitting on a park bench''' A__ = torch.manual_seed(0) A__ = pipe( prompt=UpperCAmelCase__ , image=UpperCAmelCase__ , mask_image=UpperCAmelCase__ , generator=UpperCAmelCase__ , output_type='''np''' , ) A__ = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 9e-3 def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Dict: '''simple docstring''' A__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''') A__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''') A__ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint''' '''/yellow_cat_sitting_on_a_park_bench_fp16.npy''') A__ = '''stabilityai/stable-diffusion-2-inpainting''' A__ = StableDiffusionInpaintPipeline.from_pretrained( UpperCAmelCase__ , torch_dtype=torch.floataa , safety_checker=UpperCAmelCase__ , ) pipe.to(UpperCAmelCase__) pipe.set_progress_bar_config(disable=UpperCAmelCase__) pipe.enable_attention_slicing() A__ = '''Face of a yellow cat, high resolution, sitting on a park bench''' A__ = torch.manual_seed(0) A__ = pipe( prompt=UpperCAmelCase__ , image=UpperCAmelCase__ , mask_image=UpperCAmelCase__ , generator=UpperCAmelCase__ , output_type='''np''' , ) A__ = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 5e-1 def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Optional[Any]: '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() A__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''') A__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''') A__ = '''stabilityai/stable-diffusion-2-inpainting''' A__ = PNDMScheduler.from_pretrained(UpperCAmelCase__ , subfolder='''scheduler''') A__ = StableDiffusionInpaintPipeline.from_pretrained( UpperCAmelCase__ , safety_checker=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , torch_dtype=torch.floataa , ) pipe.to(UpperCAmelCase__) pipe.set_progress_bar_config(disable=UpperCAmelCase__) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() A__ = '''Face of a yellow cat, high resolution, sitting on a park bench''' A__ = torch.manual_seed(0) A__ = pipe( prompt=UpperCAmelCase__ , image=UpperCAmelCase__ , mask_image=UpperCAmelCase__ , generator=UpperCAmelCase__ , num_inference_steps=2 , output_type='''np''' , ) A__ = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 10**9
14
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase : Tuple = logging.get_logger(__name__) _lowerCamelCase : str = { """microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""", } class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''git_vision_model''' def __init__( self : Any , UpperCAmelCase__ : Any=768 , UpperCAmelCase__ : int=3_072 , UpperCAmelCase__ : List[str]=12 , UpperCAmelCase__ : Dict=12 , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : List[Any]=224 , UpperCAmelCase__ : Union[str, Any]=16 , UpperCAmelCase__ : Union[str, Any]="quick_gelu" , UpperCAmelCase__ : Dict=1e-5 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Any=0.02 , **UpperCAmelCase__ : Any , ) ->Optional[int]: '''simple docstring''' super().__init__(**UpperCAmelCase__) A__ = hidden_size A__ = intermediate_size A__ = num_hidden_layers A__ = num_attention_heads A__ = num_channels A__ = patch_size A__ = image_size A__ = initializer_range A__ = attention_dropout A__ = layer_norm_eps A__ = hidden_act @classmethod def SCREAMING_SNAKE_CASE ( cls : Any , UpperCAmelCase__ : Union[str, os.PathLike] , **UpperCAmelCase__ : int) ->"PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(UpperCAmelCase__) A__ , A__ = cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__) # get the vision config dict if we are loading from GITConfig if config_dict.get('''model_type''') == "git": A__ = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""") return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''git''' def __init__( self : Dict , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int=30_522 , UpperCAmelCase__ : Optional[int]=768 , UpperCAmelCase__ : Dict=6 , UpperCAmelCase__ : int=12 , UpperCAmelCase__ : List[str]=3_072 , UpperCAmelCase__ : str="gelu" , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : List[Any]=1_024 , UpperCAmelCase__ : List[str]=0.02 , UpperCAmelCase__ : Any=1e-12 , UpperCAmelCase__ : Union[str, Any]=0 , UpperCAmelCase__ : List[Any]="absolute" , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : int=101 , UpperCAmelCase__ : Tuple=102 , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : List[str] , ) ->Any: '''simple docstring''' super().__init__(bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__) if vision_config is None: A__ = {} logger.info('''vision_config is None. initializing the GitVisionConfig with default values.''') A__ = GitVisionConfig(**UpperCAmelCase__) A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = hidden_act A__ = intermediate_size A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = initializer_range A__ = layer_norm_eps A__ = position_embedding_type A__ = use_cache A__ = tie_word_embeddings A__ = num_image_with_embedding A__ = bos_token_id A__ = eos_token_id def SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]: '''simple docstring''' A__ = copy.deepcopy(self.__dict__) A__ = self.vision_config.to_dict() A__ = self.__class__.model_type return output
14
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCamelCase : Union[str, Any] = { """configuration_clap""": [ """CLAP_PRETRAINED_MODEL_ARCHIVE_LIST""", """ClapAudioConfig""", """ClapConfig""", """ClapTextConfig""", ], """processing_clap""": ["""ClapProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Tuple = [ """CLAP_PRETRAINED_MODEL_ARCHIVE_LIST""", """ClapModel""", """ClapPreTrainedModel""", """ClapTextModel""", """ClapTextModelWithProjection""", """ClapAudioModel""", """ClapAudioModelWithProjection""", ] _lowerCamelCase : str = ["""ClapFeatureExtractor"""] if TYPE_CHECKING: from .configuration_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioConfig, ClapConfig, ClapTextConfig, ) from .processing_clap import ClapProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clap import ClapFeatureExtractor from .modeling_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioModel, ClapAudioModelWithProjection, ClapModel, ClapPreTrainedModel, ClapTextModel, ClapTextModelWithProjection, ) else: import sys _lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
14
import requests from bsa import BeautifulSoup def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str: """simple docstring""" A__ = BeautifulSoup(requests.get(lowercase_ , params=lowercase_ ).content , '''html.parser''' ) A__ = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} ) A__ = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' ) return anchors[2].get_text() if __name__ == "__main__": _lowerCamelCase : Optional[Any] = { """title""": ( """Precisely geometry controlled microsupercapacitors for ultrahigh areal """ """capacitance, volumetric capacitance, and energy density""" ), """journal""": """Chem. Mater.""", """volume""": 30, """pages""": """3979-3990""", """year""": 2018, """hl""": """en""", } print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params))
14
1
import baseaa def SCREAMING_SNAKE_CASE ( lowercase_ ) -> bytes: """simple docstring""" return baseaa.baaencode(string.encode('''utf-8''' ) ) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> str: """simple docstring""" return baseaa.baadecode(lowercase_ ).decode('''utf-8''' ) if __name__ == "__main__": _lowerCamelCase : List[str] = """Hello World!""" _lowerCamelCase : Union[str, Any] = baseaa_encode(test) print(encoded) _lowerCamelCase : Union[str, Any] = baseaa_decode(encoded) print(decoded)
14
import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[Any]: """simple docstring""" A__ = StableDiffusionPipeline.from_pretrained(lowercase_ , torch_dtype=torch.floataa ) # load LoRA weight from .safetensors A__ = load_file(lowercase_ ) A__ = [] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: A__ = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' ) A__ = pipeline.text_encoder else: A__ = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' ) A__ = pipeline.unet # find the target layer A__ = layer_infos.pop(0 ) while len(lowercase_ ) > -1: try: A__ = curr_layer.__getattr__(lowercase_ ) if len(lowercase_ ) > 0: A__ = layer_infos.pop(0 ) elif len(lowercase_ ) == 0: break except Exception: if len(lowercase_ ) > 0: temp_name += "_" + layer_infos.pop(0 ) else: A__ = layer_infos.pop(0 ) A__ = [] if "lora_down" in key: pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) ) pair_keys.append(lowercase_ ) else: pair_keys.append(lowercase_ ) pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) ) # update weight if len(state_dict[pair_keys[0]].shape ) == 4: A__ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) A__ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(lowercase_ , lowercase_ ).unsqueeze(2 ).unsqueeze(3 ) else: A__ = state_dict[pair_keys[0]].to(torch.floataa ) A__ = state_dict[pair_keys[1]].to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(lowercase_ , lowercase_ ) # update visited list for item in pair_keys: visited.append(lowercase_ ) return pipeline if __name__ == "__main__": _lowerCamelCase : Tuple = argparse.ArgumentParser() parser.add_argument( """--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format.""" ) parser.add_argument( """--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert.""" ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors""" ) parser.add_argument( """--lora_prefix_text_encoder""", default="""lora_te""", type=str, help="""The prefix of text encoder weight in safetensors""", ) parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""") parser.add_argument( """--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not.""" ) parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""") _lowerCamelCase : Tuple = parser.parse_args() _lowerCamelCase : List[Any] = args.base_model_path _lowerCamelCase : Optional[int] = args.checkpoint_path _lowerCamelCase : Dict = args.dump_path _lowerCamelCase : Optional[Any] = args.lora_prefix_unet _lowerCamelCase : Optional[int] = args.lora_prefix_text_encoder _lowerCamelCase : List[Any] = args.alpha _lowerCamelCase : int = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) _lowerCamelCase : Tuple = pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
14
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) _lowerCamelCase : int = { """configuration_owlvit""": [ """OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """OwlViTConfig""", """OwlViTOnnxConfig""", """OwlViTTextConfig""", """OwlViTVisionConfig""", ], """processing_owlvit""": ["""OwlViTProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Any = ["""OwlViTFeatureExtractor"""] _lowerCamelCase : str = ["""OwlViTImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : str = [ """OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """OwlViTModel""", """OwlViTPreTrainedModel""", """OwlViTTextModel""", """OwlViTVisionModel""", """OwlViTForObjectDetection""", ] if TYPE_CHECKING: from .configuration_owlvit import ( OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, OwlViTConfig, OwlViTOnnxConfig, OwlViTTextConfig, OwlViTVisionConfig, ) from .processing_owlvit import OwlViTProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_owlvit import OwlViTFeatureExtractor from .image_processing_owlvit import OwlViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_owlvit import ( OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST, OwlViTForObjectDetection, OwlViTModel, OwlViTPreTrainedModel, OwlViTTextModel, OwlViTVisionModel, ) else: import sys _lowerCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
14
import os import pytest from transformers.dynamic_module_utils import get_imports _lowerCamelCase : Any = """ import os """ _lowerCamelCase : Optional[int] = """ def foo(): import os return False """ _lowerCamelCase : List[Any] = """ def foo(): def bar(): if True: import os return False return bar() """ _lowerCamelCase : List[Any] = """ import os try: import bar except ImportError: raise ValueError() """ _lowerCamelCase : Union[str, Any] = """ import os def foo(): try: import bar except ImportError: raise ValueError() """ _lowerCamelCase : List[Any] = """ import os try: import bar except (ImportError, AttributeError): raise ValueError() """ _lowerCamelCase : List[Any] = """ import os try: import bar except ImportError as e: raise ValueError() """ _lowerCamelCase : str = """ import os try: import bar except: raise ValueError() """ _lowerCamelCase : Optional[Any] = """ import os try: import bar import baz except ImportError: raise ValueError() """ _lowerCamelCase : Any = """ import os try: import bar import baz except ImportError: x = 1 raise ValueError() """ _lowerCamelCase : Dict = [ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize('''case''' , lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int: """simple docstring""" A__ = os.path.join(lowercase_ , '''test_file.py''' ) with open(lowercase_ , '''w''' ) as _tmp_file: _tmp_file.write(lowercase_ ) A__ = get_imports(lowercase_ ) assert parsed_imports == ["os"]
14
1
import warnings from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging _lowerCamelCase : int = logging.get_logger(__name__) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = ['''input_values''', '''attention_mask'''] def __init__( self : Optional[int] , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : int = 16_000 , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : int = 80 , UpperCAmelCase__ : int = 16 , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : str = "hann_window" , UpperCAmelCase__ : float = 1.0 , UpperCAmelCase__ : float = 80 , UpperCAmelCase__ : float = 7_600 , UpperCAmelCase__ : float = 1e-10 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : bool = True , **UpperCAmelCase__ : int , ) ->int: '''simple docstring''' super().__init__(feature_size=UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , padding_value=UpperCAmelCase__ , **UpperCAmelCase__) A__ = do_normalize A__ = return_attention_mask A__ = num_mel_bins A__ = hop_length A__ = win_length A__ = win_function A__ = frame_signal_scale A__ = fmin A__ = fmax A__ = mel_floor A__ = reduction_factor A__ = win_length * sampling_rate // 1_000 A__ = hop_length * sampling_rate // 1_000 A__ = optimal_fft_length(self.sample_size) A__ = (self.n_fft // 2) + 1 A__ = window_function(window_length=self.sample_size , name=self.win_function , periodic=UpperCAmelCase__) A__ = mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='''slaney''' , mel_scale='''slaney''' , ) if frame_signal_scale != 1.0: warnings.warn( '''The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers''' , UpperCAmelCase__ , ) if reduction_factor != 2.0: warnings.warn( '''The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers''' , UpperCAmelCase__ , ) @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ : List[np.ndarray] , UpperCAmelCase__ : List[np.ndarray] , UpperCAmelCase__ : float = 0.0) ->List[np.ndarray]: '''simple docstring''' if attention_mask is not None: A__ = np.array(UpperCAmelCase__ , np.intaa) A__ = [] for vector, length in zip(UpperCAmelCase__ , attention_mask.sum(-1)): A__ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7) if length < normed_slice.shape[0]: A__ = padding_value normed_input_values.append(UpperCAmelCase__) else: A__ = [(x - x.mean()) / np.sqrt(x.var() + 1e-7) for x in input_values] return normed_input_values def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : np.ndarray , ) ->np.ndarray: '''simple docstring''' A__ = spectrogram( UpperCAmelCase__ , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='''log10''' , ) return log_mel_spec.T def __call__( self : Optional[int] , UpperCAmelCase__ : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , UpperCAmelCase__ : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , UpperCAmelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Optional[int] = None , **UpperCAmelCase__ : int , ) ->BatchFeature: '''simple docstring''' if audio is None and audio_target is None: raise ValueError('''You must provide either `audio` or `audio_target` values.''') if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of""" f""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with""" f""" {self.sampling_rate} and not {sampling_rate}.""") else: logger.warning( '''It is strongly recommended to pass the ``sampling_rate`` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''') if audio is not None: A__ = self._process_audio( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ , ) else: A__ = None if audio_target is not None: A__ = self._process_audio( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ , ) if inputs is None: return inputs_target else: A__ = inputs_target['''input_values'''] A__ = inputs_target.get('''attention_mask''') if decoder_attention_mask is not None: A__ = decoder_attention_mask return inputs def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , **UpperCAmelCase__ : int , ) ->BatchFeature: '''simple docstring''' A__ = isinstance(UpperCAmelCase__ , np.ndarray) and len(speech.shape) > 1 if is_batched_numpy and len(speech.shape) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""") A__ = is_batched_numpy or ( isinstance(UpperCAmelCase__ , (list, tuple)) and (isinstance(speech[0] , (np.ndarray, tuple, list))) ) if is_batched: A__ = [np.asarray(UpperCAmelCase__ , dtype=np.floataa) for speech in speech] elif not is_batched and not isinstance(UpperCAmelCase__ , np.ndarray): A__ = np.asarray(UpperCAmelCase__ , dtype=np.floataa) elif isinstance(UpperCAmelCase__ , np.ndarray) and speech.dtype is np.dtype(np.floataa): A__ = speech.astype(np.floataa) # always return batch if not is_batched: A__ = [speech] # needed to make pad() work on spectrogram inputs A__ = self.feature_size # convert into correct format for padding if is_target: A__ = [self._extract_mel_features(UpperCAmelCase__) for waveform in speech] A__ = BatchFeature({'''input_values''': features}) A__ = self.num_mel_bins else: A__ = BatchFeature({'''input_values''': speech}) A__ = self.pad( UpperCAmelCase__ , padding=UpperCAmelCase__ , max_length=UpperCAmelCase__ , truncation=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , ) A__ = feature_size_hack # convert input values to correct format A__ = padded_inputs['''input_values'''] if not isinstance(input_values[0] , np.ndarray): A__ = [np.asarray(UpperCAmelCase__ , dtype=np.floataa) for array in input_values] elif ( not isinstance(UpperCAmelCase__ , np.ndarray) and isinstance(input_values[0] , np.ndarray) and input_values[0].dtype is np.dtype(np.floataa) ): A__ = [array.astype(np.floataa) for array in input_values] elif isinstance(UpperCAmelCase__ , np.ndarray) and input_values.dtype is np.dtype(np.floataa): A__ = input_values.astype(np.floataa) # convert attention_mask to correct format A__ = padded_inputs.get('''attention_mask''') if attention_mask is not None: A__ = [np.asarray(UpperCAmelCase__ , dtype=np.intaa) for array in attention_mask] # zero-mean and unit-variance normalization if not is_target and self.do_normalize: A__ = ( attention_mask if self._get_padding_strategies(UpperCAmelCase__ , max_length=UpperCAmelCase__) is not PaddingStrategy.DO_NOT_PAD else None ) A__ = self.zero_mean_unit_var_norm( padded_inputs['''input_values'''] , attention_mask=UpperCAmelCase__ , padding_value=self.padding_value) if return_tensors is not None: A__ = padded_inputs.convert_to_tensors(UpperCAmelCase__) return padded_inputs def SCREAMING_SNAKE_CASE ( self : int) ->Dict[str, Any]: '''simple docstring''' A__ = super().to_dict() # Don't serialize these as they are derived from the other properties. A__ = ['''window''', '''mel_filters''', '''sample_size''', '''sample_stride''', '''n_fft''', '''n_freqs'''] for name in names: if name in output: del output[name] return output
14
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int: """simple docstring""" return int(input_a == input_a == 0 ) def SCREAMING_SNAKE_CASE ( ) -> None: """simple docstring""" print('''Truth Table of NOR Gate:''' ) print('''| Input 1 | Input 2 | Output |''' ) print(f"""| 0 | 0 | {nor_gate(0 , 0 )} |""" ) print(f"""| 0 | 1 | {nor_gate(0 , 1 )} |""" ) print(f"""| 1 | 0 | {nor_gate(1 , 0 )} |""" ) print(f"""| 1 | 1 | {nor_gate(1 , 1 )} |""" ) if __name__ == "__main__": import doctest doctest.testmod() main()
14
1
# # This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or # many nodes) can talk to each other via nccl and allocate gpu memory. # # To run first adjust the number of processes and nodes: # # python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port # # You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d # # use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 # # If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: # # NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # which should tell you what's going on behind the scenes. # # # This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that # runs on 2 nodes of 4 gpus per node: # # #SBATCH --job-name=test-nodes # name # #SBATCH --nodes=2 # nodes # #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! # #SBATCH --cpus-per-task=10 # number of cores per tasks # #SBATCH --gres=gpu:4 # number of gpus # #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) # #SBATCH --output=%x-%j.out # output file name # # GPUS_PER_NODE=4 # MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) # MASTER_PORT=6000 # # srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ # --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ # --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ # torch-distributed-gpu-test.py' # import fcntl import os import socket import torch import torch.distributed as dist def SCREAMING_SNAKE_CASE ( *lowercase_ ) -> Dict: """simple docstring""" with open(lowercase_ , '''r''' ) as fh: fcntl.flock(lowercase_ , fcntl.LOCK_EX ) try: print(*lowercase_ ) finally: fcntl.flock(lowercase_ , fcntl.LOCK_UN ) _lowerCamelCase : Optional[int] = int(os.environ["""LOCAL_RANK"""]) torch.cuda.set_device(local_rank) _lowerCamelCase : List[Any] = torch.device("""cuda""", local_rank) _lowerCamelCase : Union[str, Any] = socket.gethostname() _lowerCamelCase : str = F'''[{hostname}-{local_rank}]''' try: # test distributed dist.init_process_group("""nccl""") dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank _lowerCamelCase : Any = dist.get_rank() _lowerCamelCase : int = dist.get_world_size() printflock(F'''{gpu} is OK (global rank: {rank}/{world_size})''') dist.barrier() if rank == 0: printflock(F'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''') except Exception: printflock(F'''{gpu} is broken''') raise
14
import os import sys import unittest _lowerCamelCase : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) _lowerCamelCase : Any = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""") _lowerCamelCase : str = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""") class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE ( self : Tuple) ->Tuple: '''simple docstring''' A__ = get_test_to_tester_mapping(UpperCAmelCase__) A__ = get_test_to_tester_mapping(UpperCAmelCase__) A__ = {'''BertModelTest''': '''BertModelTester'''} A__ = { '''BlipModelTest''': '''BlipModelTester''', '''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''', '''BlipTextModelTest''': '''BlipTextModelTester''', '''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''', '''BlipVQAModelTest''': '''BlipVQAModelTester''', '''BlipVisionModelTest''': '''BlipVisionModelTester''', } self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__) self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[Any]: '''simple docstring''' A__ = get_model_to_test_mapping(UpperCAmelCase__) A__ = get_model_to_test_mapping(UpperCAmelCase__) A__ = { '''BertForMaskedLM''': ['''BertModelTest'''], '''BertForMultipleChoice''': ['''BertModelTest'''], '''BertForNextSentencePrediction''': ['''BertModelTest'''], '''BertForPreTraining''': ['''BertModelTest'''], '''BertForQuestionAnswering''': ['''BertModelTest'''], '''BertForSequenceClassification''': ['''BertModelTest'''], '''BertForTokenClassification''': ['''BertModelTest'''], '''BertLMHeadModel''': ['''BertModelTest'''], '''BertModel''': ['''BertModelTest'''], } A__ = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''], '''BlipModel''': ['''BlipModelTest'''], '''BlipTextModel''': ['''BlipTextModelTest'''], '''BlipVisionModel''': ['''BlipVisionModelTest'''], } self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__) self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->str: '''simple docstring''' A__ = get_model_to_tester_mapping(UpperCAmelCase__) A__ = get_model_to_tester_mapping(UpperCAmelCase__) A__ = { '''BertForMaskedLM''': ['''BertModelTester'''], '''BertForMultipleChoice''': ['''BertModelTester'''], '''BertForNextSentencePrediction''': ['''BertModelTester'''], '''BertForPreTraining''': ['''BertModelTester'''], '''BertForQuestionAnswering''': ['''BertModelTester'''], '''BertForSequenceClassification''': ['''BertModelTester'''], '''BertForTokenClassification''': ['''BertModelTester'''], '''BertLMHeadModel''': ['''BertModelTester'''], '''BertModel''': ['''BertModelTester'''], } A__ = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''], '''BlipModel''': ['''BlipModelTester'''], '''BlipTextModel''': ['''BlipTextModelTester'''], '''BlipVisionModel''': ['''BlipVisionModelTester'''], } self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__) self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
14
1
from __future__ import annotations from collections import deque from collections.abc import Sequence from dataclasses import dataclass from typing import Any @dataclass class UpperCamelCase_ : '''simple docstring''' UpperCAmelCase__ = 42 UpperCAmelCase__ = None UpperCAmelCase__ = None def SCREAMING_SNAKE_CASE ( ) -> Node | None: """simple docstring""" A__ = Node(1 ) A__ = Node(2 ) A__ = Node(3 ) A__ = Node(4 ) A__ = Node(5 ) return tree def SCREAMING_SNAKE_CASE ( lowercase_ ) -> list[int]: """simple docstring""" return [root.data, *preorder(root.left ), *preorder(root.right )] if root else [] def SCREAMING_SNAKE_CASE ( lowercase_ ) -> list[int]: """simple docstring""" return postorder(root.left ) + postorder(root.right ) + [root.data] if root else [] def SCREAMING_SNAKE_CASE ( lowercase_ ) -> list[int]: """simple docstring""" return [*inorder(root.left ), root.data, *inorder(root.right )] if root else [] def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int: """simple docstring""" return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0 def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Sequence[Node | None]: """simple docstring""" A__ = [] if root is None: return output A__ = deque([root] ) while process_queue: A__ = process_queue.popleft() output.append(node.data ) if node.left: process_queue.append(node.left ) if node.right: process_queue.append(node.right ) return output def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Sequence[Node | None]: """simple docstring""" A__ = [] def populate_output(lowercase_ , lowercase_ ) -> None: if not root: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.left , level - 1 ) populate_output(root.right , level - 1 ) populate_output(lowercase_ , lowercase_ ) return output def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Sequence[Node | None]: """simple docstring""" A__ = [] def populate_output(lowercase_ , lowercase_ ) -> None: if root is None: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.right , level - 1 ) populate_output(root.left , level - 1 ) populate_output(lowercase_ , lowercase_ ) return output def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Sequence[Node | None] | list[Any]: """simple docstring""" if root is None: return [] A__ = [] A__ = 0 A__ = height(lowercase_ ) for h in range(1 , height_tree + 1 ): if not flag: output.append(get_nodes_from_left_to_right(lowercase_ , lowercase_ ) ) A__ = 1 else: output.append(get_nodes_from_right_to_left(lowercase_ , lowercase_ ) ) A__ = 0 return output def SCREAMING_SNAKE_CASE ( ) -> None: # Main function for testing. """simple docstring""" A__ = make_tree() print(f"""In-order Traversal: {inorder(lowercase_ )}""" ) print(f"""Pre-order Traversal: {preorder(lowercase_ )}""" ) print(f"""Post-order Traversal: {postorder(lowercase_ )}""" , '''\n''' ) print(f"""Height of Tree: {height(lowercase_ )}""" , '''\n''' ) print('''Complete Level Order Traversal: ''' ) print(level_order(lowercase_ ) , '''\n''' ) print('''Level-wise order Traversal: ''' ) for level in range(1 , height(lowercase_ ) + 1 ): print(f"""Level {level}:""" , get_nodes_from_left_to_right(lowercase_ , level=lowercase_ ) ) print('''\nZigZag order Traversal: ''' ) print(zigzag(lowercase_ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
14
import inspect import unittest from typing import List import numpy as np from transformers import EfficientFormerConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, ) from transformers.models.efficientformer.modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import EfficientFormerImageProcessor class UpperCamelCase_ : '''simple docstring''' def __init__( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int = 13 , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : int = 128 , UpperCAmelCase__ : Optional[Any]=[16, 32, 64, 128] , UpperCAmelCase__ : int = 7 , UpperCAmelCase__ : int = 4 , UpperCAmelCase__ : int = 37 , UpperCAmelCase__ : str = "gelu" , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : int = 10 , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : int = 128 , UpperCAmelCase__ : List[int] = [2, 2, 2, 2] , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , ) ->List[Any]: '''simple docstring''' A__ = parent A__ = batch_size A__ = image_size A__ = patch_size A__ = num_channels A__ = is_training A__ = use_labels A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = type_sequence_label_size A__ = initializer_range A__ = encoder_stride A__ = num_attention_outputs A__ = embed_dim A__ = embed_dim + 1 A__ = resolution A__ = depths A__ = hidden_sizes A__ = dim A__ = mlp_expansion_ratio def SCREAMING_SNAKE_CASE ( self : List[Any]) ->str: '''simple docstring''' A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size) A__ = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE ( self : int) ->str: '''simple docstring''' return EfficientFormerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict) ->Dict: '''simple docstring''' A__ = TFEfficientFormerModel(config=UpperCAmelCase__) A__ = model(UpperCAmelCase__ , training=UpperCAmelCase__) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str) ->Union[str, Any]: '''simple docstring''' A__ = self.type_sequence_label_size A__ = TFEfficientFormerForImageClassification(UpperCAmelCase__) A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__ , training=UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) # test greyscale images A__ = 1 A__ = TFEfficientFormerForImageClassification(UpperCAmelCase__) A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def SCREAMING_SNAKE_CASE ( self : int) ->List[str]: '''simple docstring''' A__ = self.prepare_config_and_inputs() A__ , A__ , A__ = config_and_inputs A__ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = ( ( TFEfficientFormerModel, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerForImageClassification, ) if is_tf_available() else () ) UpperCAmelCase__ = ( { '''feature-extraction''': TFEfficientFormerModel, '''image-classification''': ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, ), } if is_tf_available() else {} ) UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[str]: '''simple docstring''' A__ = TFEfficientFormerModelTester(self) A__ = ConfigTester( self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37) def SCREAMING_SNAKE_CASE ( self : int) ->Any: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''EfficientFormer does not use inputs_embeds''') def SCREAMING_SNAKE_CASE ( self : List[str]) ->Dict: '''simple docstring''' pass @unittest.skip(reason='''EfficientFormer does not support input and output embeddings''') def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]: '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[Any]: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(UpperCAmelCase__) A__ = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ = [*signature.parameters.keys()] A__ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : str) ->Any: '''simple docstring''' def check_hidden_states_output(UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict): A__ = model_class(UpperCAmelCase__) A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__) A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A__ = getattr( self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1) self.assertEqual(len(UpperCAmelCase__) , UpperCAmelCase__) if hasattr(self.model_tester , '''encoder_seq_length'''): A__ = self.model_tester.encoder_seq_length if hasattr(self.model_tester , '''chunk_length''') and self.model_tester.chunk_length > 1: A__ = seq_length * self.model_tester.chunk_length else: A__ = self.model_tester.seq_length self.assertListEqual( list(hidden_states[-1].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , ) if config.is_encoder_decoder: A__ = outputs.decoder_hidden_states self.asseretIsInstance(UpperCAmelCase__ , (list, tuple)) self.assertEqual(len(UpperCAmelCase__) , UpperCAmelCase__) A__ = getattr(self.model_tester , '''seq_length''' , UpperCAmelCase__) A__ = getattr(self.model_tester , '''decoder_seq_length''' , UpperCAmelCase__) self.assertListEqual( list(hidden_states[-1].shape[-2:]) , [decoder_seq_length, self.model_tester.hidden_size] , ) A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = True check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A__ = True check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict=False) ->int: '''simple docstring''' A__ = super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__) if return_labels: if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__) @unittest.skip(reason='''EfficientFormer does not implement masked image modeling yet''') def SCREAMING_SNAKE_CASE ( self : str) ->str: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Any) ->Tuple: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__) @slow def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]: '''simple docstring''' for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ = TFEfficientFormerModel.from_pretrained(UpperCAmelCase__) self.assertIsNotNone(UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Any) ->str: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = True A__ = getattr(self.model_tester , '''seq_length''' , UpperCAmelCase__) A__ = getattr(self.model_tester , '''encoder_seq_length''' , UpperCAmelCase__) A__ = getattr(self.model_tester , '''key_length''' , UpperCAmelCase__) A__ = getattr(self.model_tester , '''chunk_length''' , UpperCAmelCase__) if chunk_length is not None and hasattr(self.model_tester , '''num_hashes'''): A__ = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: A__ = True A__ = False A__ = True A__ = model_class(UpperCAmelCase__) A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__) A__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(UpperCAmelCase__) , self.model_tester.num_attention_outputs) # check that output_attentions also work using config del inputs_dict["output_attentions"] A__ = True A__ = model_class(UpperCAmelCase__) A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__) A__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(UpperCAmelCase__) , self.model_tester.num_attention_outputs) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:]) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , ) else: self.assertListEqual( list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , ) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[Any]: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Prepare our model A__ = model_class(UpperCAmelCase__) # These are maximally general inputs for the model, with multiple None dimensions # Hopefully this will catch any conditionals that fail for flexible shapes A__ = { key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=UpperCAmelCase__) for key, val in model.input_signature.items() if key in model.dummy_inputs } A__ = model(UpperCAmelCase__) self.assertTrue(outputs_dict is not None) def SCREAMING_SNAKE_CASE ( ) -> Any: """simple docstring""" A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: '''simple docstring''' return ( EfficientFormerImageProcessor.from_pretrained('''snap-research/efficientformer-l1-300''') if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE ( self : List[str]) ->Any: '''simple docstring''' A__ = TFEfficientFormerForImageClassification.from_pretrained('''snap-research/efficientformer-l1-300''') A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(images=UpperCAmelCase__ , return_tensors='''tf''') # forward pass A__ = model(**UpperCAmelCase__ , training=UpperCAmelCase__) # verify the logits A__ = tf.TensorShape((1, 1_000)) self.assertEqual(outputs.logits.shape , UpperCAmelCase__) A__ = tf.constant([-0.0555, 0.4825, -0.0852]) self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4)) @slow def SCREAMING_SNAKE_CASE ( self : Dict) ->int: '''simple docstring''' A__ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained( '''snap-research/efficientformer-l1-300''') A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(images=UpperCAmelCase__ , return_tensors='''tf''') # forward pass A__ = model(**UpperCAmelCase__ , training=UpperCAmelCase__) # verify the logits A__ = tf.TensorShape((1, 1_000)) self.assertEqual(outputs.logits.shape , UpperCAmelCase__) A__ = tf.constant([-0.1312, 0.4353, -1.0499]) self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4))
14
1
from __future__ import annotations class UpperCamelCase_ : '''simple docstring''' def __init__( self : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : str) ->List[str]: '''simple docstring''' A__ , A__ = text, pattern A__ , A__ = len(UpperCAmelCase__), len(UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : str) ->int: '''simple docstring''' for i in range(self.patLen - 1 , -1 , -1): if char == self.pattern[i]: return i return -1 def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : int) ->int: '''simple docstring''' for i in range(self.patLen - 1 , -1 , -1): if self.pattern[i] != self.text[current_pos + i]: return current_pos + i return -1 def SCREAMING_SNAKE_CASE ( self : int) ->list[int]: '''simple docstring''' A__ = [] for i in range(self.textLen - self.patLen + 1): A__ = self.mismatch_in_text(UpperCAmelCase__) if mismatch_index == -1: positions.append(UpperCAmelCase__) else: A__ = self.match_in_pattern(self.text[mismatch_index]) A__ = ( mismatch_index - match_index ) # shifting index lgtm [py/multiple-definition] return positions _lowerCamelCase : List[Any] = """ABAABA""" _lowerCamelCase : List[Any] = """AB""" _lowerCamelCase : Dict = BoyerMooreSearch(text, pattern) _lowerCamelCase : List[Any] = bms.bad_character_heuristic() if len(positions) == 0: print("""No match found""") else: print("""Pattern found in following positions: """) print(positions)
14
from __future__ import annotations def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> tuple[float, list[float]]: """simple docstring""" A__ = list(range(len(lowercase_ ) ) ) A__ = [v / w for v, w in zip(lowercase_ , lowercase_ )] index.sort(key=lambda lowercase_ : ratio[i] , reverse=lowercase_ ) A__ = 0 A__ = [0] * len(lowercase_ ) for i in index: if weight[i] <= capacity: A__ = 1 max_value += value[i] capacity -= weight[i] else: A__ = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
14
1
from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker _lowerCamelCase : Tuple = """CompVis/stable-diffusion-v1-1""" _lowerCamelCase : Tuple = """CompVis/stable-diffusion-v1-2""" _lowerCamelCase : List[Any] = """CompVis/stable-diffusion-v1-3""" _lowerCamelCase : Optional[Any] = """CompVis/stable-diffusion-v1-4""" class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : Optional[int] , UpperCAmelCase__ : AutoencoderKL , UpperCAmelCase__ : CLIPTextModel , UpperCAmelCase__ : CLIPTokenizer , UpperCAmelCase__ : UNetaDConditionModel , UpperCAmelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCAmelCase__ : StableDiffusionSafetyChecker , UpperCAmelCase__ : CLIPImageProcessor , UpperCAmelCase__ : bool = True , ) ->List[Any]: '''simple docstring''' super()._init_() A__ = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__) A__ = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__) A__ = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__) A__ = StableDiffusionPipeline( vae=UpperCAmelCase__ , text_encoder=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , safety_checker=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ , requires_safety_checker=UpperCAmelCase__ , ) self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea) @property def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Dict[str, Any]: '''simple docstring''' return {k: getattr(self , UpperCAmelCase__) for k in self.config.keys() if not k.startswith('''_''')} def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : Optional[Union[str, int]] = "auto") ->List[Any]: '''simple docstring''' if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory A__ = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Dict) ->int: '''simple docstring''' self.enable_attention_slicing(UpperCAmelCase__) @torch.no_grad() def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : Union[str, List[str]] , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 50 , UpperCAmelCase__ : float = 7.5 , UpperCAmelCase__ : Optional[Union[str, List[str]]] = None , UpperCAmelCase__ : Optional[int] = 1 , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : Optional[torch.Generator] = None , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase__ : int = 1 , **UpperCAmelCase__ : str , ) ->Tuple: '''simple docstring''' return self.pipea( prompt=UpperCAmelCase__ , height=UpperCAmelCase__ , width=UpperCAmelCase__ , num_inference_steps=UpperCAmelCase__ , guidance_scale=UpperCAmelCase__ , negative_prompt=UpperCAmelCase__ , num_images_per_prompt=UpperCAmelCase__ , eta=UpperCAmelCase__ , generator=UpperCAmelCase__ , latents=UpperCAmelCase__ , output_type=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , callback=UpperCAmelCase__ , callback_steps=UpperCAmelCase__ , **UpperCAmelCase__ , ) @torch.no_grad() def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : Union[str, List[str]] , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 50 , UpperCAmelCase__ : float = 7.5 , UpperCAmelCase__ : Optional[Union[str, List[str]]] = None , UpperCAmelCase__ : Optional[int] = 1 , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : Optional[torch.Generator] = None , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase__ : int = 1 , **UpperCAmelCase__ : str , ) ->Optional[int]: '''simple docstring''' return self.pipea( prompt=UpperCAmelCase__ , height=UpperCAmelCase__ , width=UpperCAmelCase__ , num_inference_steps=UpperCAmelCase__ , guidance_scale=UpperCAmelCase__ , negative_prompt=UpperCAmelCase__ , num_images_per_prompt=UpperCAmelCase__ , eta=UpperCAmelCase__ , generator=UpperCAmelCase__ , latents=UpperCAmelCase__ , output_type=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , callback=UpperCAmelCase__ , callback_steps=UpperCAmelCase__ , **UpperCAmelCase__ , ) @torch.no_grad() def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Union[str, List[str]] , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 50 , UpperCAmelCase__ : float = 7.5 , UpperCAmelCase__ : Optional[Union[str, List[str]]] = None , UpperCAmelCase__ : Optional[int] = 1 , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : Optional[torch.Generator] = None , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase__ : int = 1 , **UpperCAmelCase__ : List[str] , ) ->List[Any]: '''simple docstring''' return self.pipea( prompt=UpperCAmelCase__ , height=UpperCAmelCase__ , width=UpperCAmelCase__ , num_inference_steps=UpperCAmelCase__ , guidance_scale=UpperCAmelCase__ , negative_prompt=UpperCAmelCase__ , num_images_per_prompt=UpperCAmelCase__ , eta=UpperCAmelCase__ , generator=UpperCAmelCase__ , latents=UpperCAmelCase__ , output_type=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , callback=UpperCAmelCase__ , callback_steps=UpperCAmelCase__ , **UpperCAmelCase__ , ) @torch.no_grad() def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Union[str, List[str]] , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 50 , UpperCAmelCase__ : float = 7.5 , UpperCAmelCase__ : Optional[Union[str, List[str]]] = None , UpperCAmelCase__ : Optional[int] = 1 , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : Optional[torch.Generator] = None , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase__ : int = 1 , **UpperCAmelCase__ : Union[str, Any] , ) ->Optional[Any]: '''simple docstring''' return self.pipea( prompt=UpperCAmelCase__ , height=UpperCAmelCase__ , width=UpperCAmelCase__ , num_inference_steps=UpperCAmelCase__ , guidance_scale=UpperCAmelCase__ , negative_prompt=UpperCAmelCase__ , num_images_per_prompt=UpperCAmelCase__ , eta=UpperCAmelCase__ , generator=UpperCAmelCase__ , latents=UpperCAmelCase__ , output_type=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , callback=UpperCAmelCase__ , callback_steps=UpperCAmelCase__ , **UpperCAmelCase__ , ) @torch.no_grad() def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : Union[str, List[str]] , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 50 , UpperCAmelCase__ : float = 7.5 , UpperCAmelCase__ : Optional[Union[str, List[str]]] = None , UpperCAmelCase__ : Optional[int] = 1 , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : Optional[torch.Generator] = None , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase__ : int = 1 , **UpperCAmelCase__ : Union[str, Any] , ) ->Tuple: '''simple docstring''' A__ = '''cuda''' if torch.cuda.is_available() else '''cpu''' self.to(UpperCAmelCase__) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(f"""`height` and `width` must be divisible by 8 but are {height} and {width}.""") # Get first result from Stable Diffusion Checkpoint v1.1 A__ = self.textaimg_sda_a( prompt=UpperCAmelCase__ , height=UpperCAmelCase__ , width=UpperCAmelCase__ , num_inference_steps=UpperCAmelCase__ , guidance_scale=UpperCAmelCase__ , negative_prompt=UpperCAmelCase__ , num_images_per_prompt=UpperCAmelCase__ , eta=UpperCAmelCase__ , generator=UpperCAmelCase__ , latents=UpperCAmelCase__ , output_type=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , callback=UpperCAmelCase__ , callback_steps=UpperCAmelCase__ , **UpperCAmelCase__ , ) # Get first result from Stable Diffusion Checkpoint v1.2 A__ = self.textaimg_sda_a( prompt=UpperCAmelCase__ , height=UpperCAmelCase__ , width=UpperCAmelCase__ , num_inference_steps=UpperCAmelCase__ , guidance_scale=UpperCAmelCase__ , negative_prompt=UpperCAmelCase__ , num_images_per_prompt=UpperCAmelCase__ , eta=UpperCAmelCase__ , generator=UpperCAmelCase__ , latents=UpperCAmelCase__ , output_type=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , callback=UpperCAmelCase__ , callback_steps=UpperCAmelCase__ , **UpperCAmelCase__ , ) # Get first result from Stable Diffusion Checkpoint v1.3 A__ = self.textaimg_sda_a( prompt=UpperCAmelCase__ , height=UpperCAmelCase__ , width=UpperCAmelCase__ , num_inference_steps=UpperCAmelCase__ , guidance_scale=UpperCAmelCase__ , negative_prompt=UpperCAmelCase__ , num_images_per_prompt=UpperCAmelCase__ , eta=UpperCAmelCase__ , generator=UpperCAmelCase__ , latents=UpperCAmelCase__ , output_type=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , callback=UpperCAmelCase__ , callback_steps=UpperCAmelCase__ , **UpperCAmelCase__ , ) # Get first result from Stable Diffusion Checkpoint v1.4 A__ = self.textaimg_sda_a( prompt=UpperCAmelCase__ , height=UpperCAmelCase__ , width=UpperCAmelCase__ , num_inference_steps=UpperCAmelCase__ , guidance_scale=UpperCAmelCase__ , negative_prompt=UpperCAmelCase__ , num_images_per_prompt=UpperCAmelCase__ , eta=UpperCAmelCase__ , generator=UpperCAmelCase__ , latents=UpperCAmelCase__ , output_type=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , callback=UpperCAmelCase__ , callback_steps=UpperCAmelCase__ , **UpperCAmelCase__ , ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]])
14
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[Any]: """simple docstring""" A__ = args.log_outputs A__ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] ) # load metric A__ = load_metric('''wer''' ) A__ = load_metric('''cer''' ) # compute metrics A__ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] ) A__ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] ) # print & log results A__ = f"""WER: {wer_result}\nCER: {cer_result}""" print(lowercase_ ) with open(f"""{dataset_id}_eval_results.txt""" , '''w''' ) as f: f.write(lowercase_ ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: A__ = f"""log_{dataset_id}_predictions.txt""" A__ = f"""log_{dataset_id}_targets.txt""" with open(lowercase_ , '''w''' ) as p, open(lowercase_ , '''w''' ) as t: # mapping function to write output def write_to_file(lowercase_ , lowercase_ ): p.write(f"""{i}""" + '''\n''' ) p.write(batch['''prediction'''] + '''\n''' ) t.write(f"""{i}""" + '''\n''' ) t.write(batch['''target'''] + '''\n''' ) result.map(lowercase_ , with_indices=lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> str: """simple docstring""" A__ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training A__ = re.sub(lowercase_ , '''''' , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! A__ = ['''\n\n''', '''\n''', ''' ''', ''' '''] for t in token_sequences_to_ignore: A__ = ''' '''.join(text.split(lowercase_ ) ) return text def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]: """simple docstring""" A__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowercase_ ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor A__ = AutoFeatureExtractor.from_pretrained(args.model_id ) A__ = feature_extractor.sampling_rate # resample audio A__ = dataset.cast_column('''audio''' , Audio(sampling_rate=lowercase_ ) ) # load eval pipeline if args.device is None: A__ = 0 if torch.cuda.is_available() else -1 A__ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(lowercase_ ): A__ = asr( batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) A__ = prediction['''text'''] A__ = normalize_text(batch['''sentence'''] ) return batch # run inference on all examples A__ = dataset.map(lowercase_ , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(lowercase_ , lowercase_ ) if __name__ == "__main__": _lowerCamelCase : List[Any] = argparse.ArgumentParser() parser.add_argument( """--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers""" ) parser.add_argument( """--dataset""", type=str, required=True, help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""", ) parser.add_argument( """--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice""" ) parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""") parser.add_argument( """--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds.""" ) parser.add_argument( """--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second.""" ) parser.add_argument( """--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis.""" ) parser.add_argument( """--device""", type=int, default=None, help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""", ) _lowerCamelCase : str = parser.parse_args() main(args)
14
1
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase : Tuple = logging.get_logger(__name__) _lowerCamelCase : Optional[Any] = { """google/canine-s""": """https://huggingface.co/google/canine-s/resolve/main/config.json""", # See all CANINE models at https://huggingface.co/models?filter=canine } class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''canine''' def __init__( self : int , UpperCAmelCase__ : List[str]=768 , UpperCAmelCase__ : Optional[Any]=12 , UpperCAmelCase__ : List[str]=12 , UpperCAmelCase__ : Optional[Any]=3_072 , UpperCAmelCase__ : Dict="gelu" , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : Union[str, Any]=16_384 , UpperCAmelCase__ : Any=16 , UpperCAmelCase__ : List[str]=0.02 , UpperCAmelCase__ : Optional[Any]=1e-12 , UpperCAmelCase__ : str=0 , UpperCAmelCase__ : List[str]=0xe0_00 , UpperCAmelCase__ : Dict=0xe0_01 , UpperCAmelCase__ : Optional[int]=4 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : List[Any]=8 , UpperCAmelCase__ : Optional[int]=16_384 , UpperCAmelCase__ : Dict=128 , **UpperCAmelCase__ : Any , ) ->Union[str, Any]: '''simple docstring''' super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__) A__ = max_position_embeddings A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = initializer_range A__ = type_vocab_size A__ = layer_norm_eps # Character config: A__ = downsampling_rate A__ = upsampling_kernel_size A__ = num_hash_functions A__ = num_hash_buckets A__ = local_transformer_stride
14
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) _lowerCamelCase : int = { """configuration_blip""": [ """BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BlipConfig""", """BlipTextConfig""", """BlipVisionConfig""", ], """processing_blip""": ["""BlipProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Tuple = ["""BlipImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : List[Any] = [ """BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """BlipModel""", """BlipPreTrainedModel""", """BlipForConditionalGeneration""", """BlipForQuestionAnswering""", """BlipVisionModel""", """BlipTextModel""", """BlipForImageTextRetrieval""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Optional[Any] = [ """TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFBlipModel""", """TFBlipPreTrainedModel""", """TFBlipForConditionalGeneration""", """TFBlipForQuestionAnswering""", """TFBlipVisionModel""", """TFBlipTextModel""", """TFBlipForImageTextRetrieval""", ] if TYPE_CHECKING: from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig from .processing_blip import BlipProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_blip import BlipImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip import ( BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipPreTrainedModel, BlipTextModel, BlipVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blip import ( TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, TFBlipModel, TFBlipPreTrainedModel, TFBlipTextModel, TFBlipVisionModel, ) else: import sys _lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
14
1
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int: """simple docstring""" A__ = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> List[Any]: """simple docstring""" A__ = 0 while b > 0: if b & 1: A__ = ((res % c) + (a % c)) % c a += a b >>= 1 return res
14
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCamelCase : List[str] = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : List[Any] = [ """VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""", """ViTMSNModel""", """ViTMSNForImageClassification""", """ViTMSNPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_msn import ( VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, ) else: import sys _lowerCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
14
1
import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : List[Any]) ->Union[str, Any]: '''simple docstring''' A__ = 3 A__ = 250 A__ = ids_tensor((batch_size, length) , UpperCAmelCase__) A__ = torch.ones((batch_size, length) , device=UpperCAmelCase__ , dtype=torch.float) / length return input_ids, scores def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict: '''simple docstring''' A__ , A__ = self._get_tensors(5) A__ = StoppingCriteriaList( [ MaxLengthCriteria(max_length=10), MaxTimeCriteria(max_time=0.1), ]) self.assertFalse(criteria(UpperCAmelCase__ , UpperCAmelCase__)) A__ , A__ = self._get_tensors(9) self.assertFalse(criteria(UpperCAmelCase__ , UpperCAmelCase__)) A__ , A__ = self._get_tensors(10) self.assertTrue(criteria(UpperCAmelCase__ , UpperCAmelCase__)) def SCREAMING_SNAKE_CASE ( self : Dict) ->int: '''simple docstring''' A__ = MaxLengthCriteria(max_length=10) A__ , A__ = self._get_tensors(5) self.assertFalse(criteria(UpperCAmelCase__ , UpperCAmelCase__)) A__ , A__ = self._get_tensors(9) self.assertFalse(criteria(UpperCAmelCase__ , UpperCAmelCase__)) A__ , A__ = self._get_tensors(10) self.assertTrue(criteria(UpperCAmelCase__ , UpperCAmelCase__)) def SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]: '''simple docstring''' A__ = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5) A__ , A__ = self._get_tensors(5) self.assertFalse(criteria(UpperCAmelCase__ , UpperCAmelCase__)) A__ , A__ = self._get_tensors(9) self.assertFalse(criteria(UpperCAmelCase__ , UpperCAmelCase__)) A__ , A__ = self._get_tensors(10) self.assertTrue(criteria(UpperCAmelCase__ , UpperCAmelCase__)) A__ = StoppingCriteriaList([criteria]) self.assertEqual(criteria_list.max_length , 10) def SCREAMING_SNAKE_CASE ( self : str) ->Optional[int]: '''simple docstring''' A__ , A__ = self._get_tensors(5) A__ = MaxTimeCriteria(max_time=0.1) self.assertFalse(criteria(UpperCAmelCase__ , UpperCAmelCase__)) A__ = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2) self.assertTrue(criteria(UpperCAmelCase__ , UpperCAmelCase__)) def SCREAMING_SNAKE_CASE ( self : Any) ->Dict: '''simple docstring''' validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10)]) , 10) with self.assertWarns(UpperCAmelCase__): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10)]) , 11) A__ = validate_stopping_criteria(StoppingCriteriaList() , 11) self.assertEqual(len(UpperCAmelCase__) , 1)
14
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> "list[int]": """simple docstring""" if upper_limit < 0: raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' ) A__ = [0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 A__ = 1 if upper_limit > 0: A__ = 1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 , upper_limit + 1 ): for j in range(lowercase_ ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""") print("""\n*** Enter -1 at any time to quit ***""") print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""") try: while True: _lowerCamelCase : List[Any] = int(input().strip()) if N < 0: print("""\n********* Goodbye!! ************""") break else: print(F'''The Catalan numbers from 0 through {N} are:''') print(catalan_numbers(N)) print("""Try another upper limit for the sequence: """, end="""""") except (NameError, ValueError): print("""\n********* Invalid input, goodbye! ************\n""") import doctest doctest.testmod()
14
1
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer _lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) _lowerCamelCase : Tuple = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} _lowerCamelCase : Union[str, Any] = { """vocab_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } _lowerCamelCase : str = { """vocab_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } _lowerCamelCase : str = { """vocab_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json""" ), }, } _lowerCamelCase : Any = { """facebook/dpr-ctx_encoder-single-nq-base""": 512, """facebook/dpr-ctx_encoder-multiset-base""": 512, } _lowerCamelCase : List[str] = { """facebook/dpr-question_encoder-single-nq-base""": 512, """facebook/dpr-question_encoder-multiset-base""": 512, } _lowerCamelCase : Tuple = { """facebook/dpr-reader-single-nq-base""": 512, """facebook/dpr-reader-multiset-base""": 512, } _lowerCamelCase : Optional[Any] = { """facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True}, } _lowerCamelCase : Optional[int] = { """facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True}, } _lowerCamelCase : Optional[Any] = { """facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True}, } class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCAmelCase__ = DPRContextEncoderTokenizer class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCAmelCase__ = DPRQuestionEncoderTokenizer _lowerCamelCase : int = collections.namedtuple( """DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""] ) _lowerCamelCase : Any = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""]) _lowerCamelCase : Dict = r""" Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Return: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. """ @add_start_docstrings(UpperCAmelCase__ ) class UpperCamelCase_ : '''simple docstring''' def __call__( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Union[bool, str] = False , UpperCAmelCase__ : Union[bool, str] = False , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Optional[bool] = None , **UpperCAmelCase__ : Optional[int] , ) ->BatchEncoding: '''simple docstring''' if titles is None and texts is None: return super().__call__( UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , ) elif titles is None or texts is None: A__ = titles if texts is None else texts return super().__call__( UpperCAmelCase__ , UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , ) A__ = titles if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [titles] A__ = texts if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [texts] A__ = len(UpperCAmelCase__) A__ = questions if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [questions] * n_passages assert len(UpperCAmelCase__) == len( UpperCAmelCase__), f"""There should be as many titles than texts but got {len(UpperCAmelCase__)} titles and {len(UpperCAmelCase__)} texts.""" A__ = super().__call__(UpperCAmelCase__ , UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__)['''input_ids'''] A__ = super().__call__(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__)['''input_ids'''] A__ = { '''input_ids''': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(UpperCAmelCase__ , UpperCAmelCase__) ] } if return_attention_mask is not False: A__ = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids]) A__ = attention_mask return self.pad(UpperCAmelCase__ , padding=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : BatchEncoding , UpperCAmelCase__ : DPRReaderOutput , UpperCAmelCase__ : int = 16 , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : int = 4 , ) ->List[DPRSpanPrediction]: '''simple docstring''' A__ = reader_input['''input_ids'''] A__ , A__ , A__ = reader_output[:3] A__ = len(UpperCAmelCase__) A__ = sorted(range(UpperCAmelCase__) , reverse=UpperCAmelCase__ , key=relevance_logits.__getitem__) A__ = [] for doc_id in sorted_docs: A__ = list(input_ids[doc_id]) # assuming question & title information is at the beginning of the sequence A__ = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: A__ = sequence_ids.index(self.pad_token_id) else: A__ = len(UpperCAmelCase__) A__ = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCAmelCase__ , top_spans=UpperCAmelCase__ , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCAmelCase__ , start_index=UpperCAmelCase__ , end_index=UpperCAmelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1]) , )) if len(UpperCAmelCase__) >= num_spans: break return nbest_spans_predictions[:num_spans] def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , ) ->List[DPRSpanPrediction]: '''simple docstring''' A__ = [] for start_index, start_score in enumerate(UpperCAmelCase__): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]): scores.append(((start_index, start_index + answer_length), start_score + end_score)) A__ = sorted(UpperCAmelCase__ , key=lambda UpperCAmelCase__: x[1] , reverse=UpperCAmelCase__) A__ = [] for (start_index, end_index), score in scores: assert start_index <= end_index, f"""Wrong span indices: [{start_index}:{end_index}]""" A__ = end_index - start_index + 1 assert length <= max_answer_length, f"""Span is too long: {length} > {max_answer_length}""" if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals): continue chosen_span_intervals.append((start_index, end_index)) if len(UpperCAmelCase__) == top_spans: break return chosen_span_intervals @add_end_docstrings(UpperCAmelCase__ ) class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = READER_PRETRAINED_INIT_CONFIGURATION UpperCAmelCase__ = ['''input_ids''', '''attention_mask'''] UpperCAmelCase__ = DPRReaderTokenizer
14
import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict: """simple docstring""" A__ = args.pruning_method A__ = args.threshold A__ = args.model_name_or_path.rstrip('''/''' ) A__ = args.target_model_path print(f"""Load fine-pruned model from {model_name_or_path}""" ) A__ = torch.load(os.path.join(lowercase_ , '''pytorch_model.bin''' ) ) A__ = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: A__ = tensor print(f"""Copied layer {name}""" ) elif "classifier" in name or "qa_output" in name: A__ = tensor print(f"""Copied layer {name}""" ) elif "bias" in name: A__ = tensor print(f"""Copied layer {name}""" ) else: if pruning_method == "magnitude": A__ = MagnitudeBinarizer.apply(inputs=lowercase_ , threshold=lowercase_ ) A__ = tensor * mask print(f"""Pruned layer {name}""" ) elif pruning_method == "topK": if "mask_scores" in name: continue A__ = name[:-6] A__ = model[f"""{prefix_}mask_scores"""] A__ = TopKBinarizer.apply(lowercase_ , lowercase_ ) A__ = tensor * mask print(f"""Pruned layer {name}""" ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue A__ = name[:-6] A__ = model[f"""{prefix_}mask_scores"""] A__ = ThresholdBinarizer.apply(lowercase_ , lowercase_ , lowercase_ ) A__ = tensor * mask print(f"""Pruned layer {name}""" ) elif pruning_method == "l0": if "mask_scores" in name: continue A__ = name[:-6] A__ = model[f"""{prefix_}mask_scores"""] A__ , A__ = -0.1, 1.1 A__ = torch.sigmoid(lowercase_ ) A__ = s * (r - l) + l A__ = s_bar.clamp(min=0.0 , max=1.0 ) A__ = tensor * mask print(f"""Pruned layer {name}""" ) else: raise ValueError('''Unknown pruning method''' ) if target_model_path is None: A__ = os.path.join( os.path.dirname(lowercase_ ) , f"""bertarized_{os.path.basename(lowercase_ )}""" ) if not os.path.isdir(lowercase_ ): shutil.copytree(lowercase_ , lowercase_ ) print(f"""\nCreated folder {target_model_path}""" ) torch.save(lowercase_ , os.path.join(lowercase_ , '''pytorch_model.bin''' ) ) print('''\nPruned model saved! See you later!''' ) if __name__ == "__main__": _lowerCamelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument( """--pruning_method""", choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""], type=str, required=True, help=( """Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,""" """ sigmoied_threshold = Soft movement pruning)""" ), ) parser.add_argument( """--threshold""", type=float, required=False, help=( """For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.""" """For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.""" """Not needed for `l0`""" ), ) parser.add_argument( """--model_name_or_path""", type=str, required=True, help="""Folder containing the model that was previously fine-pruned""", ) parser.add_argument( """--target_model_path""", default=None, type=str, required=False, help="""Folder containing the model that was previously fine-pruned""", ) _lowerCamelCase : int = parser.parse_args() main(args)
14
1
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[Any]: """simple docstring""" A__ = args.log_outputs A__ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] ) # load metric A__ = load_metric('''wer''' ) A__ = load_metric('''cer''' ) # compute metrics A__ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] ) A__ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] ) # print & log results A__ = f"""WER: {wer_result}\nCER: {cer_result}""" print(lowercase_ ) with open(f"""{dataset_id}_eval_results.txt""" , '''w''' ) as f: f.write(lowercase_ ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: A__ = f"""log_{dataset_id}_predictions.txt""" A__ = f"""log_{dataset_id}_targets.txt""" with open(lowercase_ , '''w''' ) as p, open(lowercase_ , '''w''' ) as t: # mapping function to write output def write_to_file(lowercase_ , lowercase_ ): p.write(f"""{i}""" + '''\n''' ) p.write(batch['''prediction'''] + '''\n''' ) t.write(f"""{i}""" + '''\n''' ) t.write(batch['''target'''] + '''\n''' ) result.map(lowercase_ , with_indices=lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> str: """simple docstring""" A__ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training A__ = re.sub(lowercase_ , '''''' , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! A__ = ['''\n\n''', '''\n''', ''' ''', ''' '''] for t in token_sequences_to_ignore: A__ = ''' '''.join(text.split(lowercase_ ) ) return text def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]: """simple docstring""" A__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowercase_ ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor A__ = AutoFeatureExtractor.from_pretrained(args.model_id ) A__ = feature_extractor.sampling_rate # resample audio A__ = dataset.cast_column('''audio''' , Audio(sampling_rate=lowercase_ ) ) # load eval pipeline if args.device is None: A__ = 0 if torch.cuda.is_available() else -1 A__ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(lowercase_ ): A__ = asr( batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) A__ = prediction['''text'''] A__ = normalize_text(batch['''sentence'''] ) return batch # run inference on all examples A__ = dataset.map(lowercase_ , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(lowercase_ , lowercase_ ) if __name__ == "__main__": _lowerCamelCase : List[Any] = argparse.ArgumentParser() parser.add_argument( """--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers""" ) parser.add_argument( """--dataset""", type=str, required=True, help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""", ) parser.add_argument( """--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice""" ) parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""") parser.add_argument( """--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds.""" ) parser.add_argument( """--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second.""" ) parser.add_argument( """--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis.""" ) parser.add_argument( """--device""", type=int, default=None, help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""", ) _lowerCamelCase : str = parser.parse_args() main(args)
14
_lowerCamelCase : Optional[int] = 65521 def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int: """simple docstring""" A__ = 1 A__ = 0 for plain_chr in plain_text: A__ = (a + ord(lowercase_ )) % MOD_ADLER A__ = (b + a) % MOD_ADLER return (b << 16) | a
14
1
from typing import List import jiwer import jiwer.transforms as tr from packaging import version import datasets from datasets.config import PY_VERSION if PY_VERSION < version.parse("""3.8"""): import importlib_metadata else: import importlib.metadata as importlib_metadata _lowerCamelCase : List[Any] = """""" if version.parse(importlib_metadata.version("""jiwer""")) < version.parse("""2.3.0"""): class UpperCamelCase_ ( tr.AbstractTransform ): '''simple docstring''' def __init__( self : int , UpperCAmelCase__ : str = " ") ->Union[str, Any]: '''simple docstring''' A__ = sentence_delimiter def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : str) ->int: '''simple docstring''' return list(UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : List[str]) ->str: '''simple docstring''' A__ = [] for sent_idx, sentence in enumerate(UpperCAmelCase__): chars.extend(self.process_string(UpperCAmelCase__)) if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(UpperCAmelCase__) - 1: chars.append(self.sentence_delimiter) return chars _lowerCamelCase : Optional[int] = tr.Compose( [tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)] ) else: _lowerCamelCase : str = tr.Compose( [ tr.RemoveMultipleSpaces(), tr.Strip(), tr.ReduceToSingleSentence(SENTENCE_DELIMITER), tr.ReduceToListOfListOfChars(), ] ) _lowerCamelCase : Any = """\ @inproceedings{inproceedings, author = {Morris, Andrew and Maier, Viktoria and Green, Phil}, year = {2004}, month = {01}, pages = {}, title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.} } """ _lowerCamelCase : Union[str, Any] = """\ Character error rate (CER) is a common metric of the performance of an automatic speech recognition system. CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information. Character error rate can be computed as: CER = (S + D + I) / N = (S + D + I) / (S + D + C) where S is the number of substitutions, D is the number of deletions, I is the number of insertions, C is the number of correct characters, N is the number of characters in the reference (N=S+D+C). CER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the performance of the ASR system with a CER of 0 being a perfect score. """ _lowerCamelCase : List[str] = """ Computes CER score of transcribed segments against references. Args: references: list of references for each speech input. predictions: list of transcribtions to score. concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result. Returns: (float): the character error rate Examples: >>> predictions = [\"this is the prediction\", \"there is an other sample\"] >>> references = [\"this is the reference\", \"there is another one\"] >>> cer = datasets.load_metric(\"cer\") >>> cer_score = cer.compute(predictions=predictions, references=references) >>> print(cer_score) 0.34146341463414637 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase_ ( datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->str: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence'''), '''references''': datasets.Value('''string''' , id='''sequence'''), }) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[ '''https://en.wikipedia.org/wiki/Word_error_rate''', '''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''', ] , ) def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int=False) ->Any: '''simple docstring''' if concatenate_texts: return jiwer.compute_measures( UpperCAmelCase__ , UpperCAmelCase__ , truth_transform=UpperCAmelCase__ , hypothesis_transform=UpperCAmelCase__ , )["wer"] A__ = 0 A__ = 0 for prediction, reference in zip(UpperCAmelCase__ , UpperCAmelCase__): A__ = jiwer.compute_measures( UpperCAmelCase__ , UpperCAmelCase__ , truth_transform=UpperCAmelCase__ , hypothesis_transform=UpperCAmelCase__ , ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
14
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer _lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) _lowerCamelCase : Tuple = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} _lowerCamelCase : Union[str, Any] = { """vocab_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } _lowerCamelCase : str = { """vocab_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } _lowerCamelCase : str = { """vocab_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json""" ), }, } _lowerCamelCase : Any = { """facebook/dpr-ctx_encoder-single-nq-base""": 512, """facebook/dpr-ctx_encoder-multiset-base""": 512, } _lowerCamelCase : List[str] = { """facebook/dpr-question_encoder-single-nq-base""": 512, """facebook/dpr-question_encoder-multiset-base""": 512, } _lowerCamelCase : Tuple = { """facebook/dpr-reader-single-nq-base""": 512, """facebook/dpr-reader-multiset-base""": 512, } _lowerCamelCase : Optional[Any] = { """facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True}, } _lowerCamelCase : Optional[int] = { """facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True}, } _lowerCamelCase : Optional[Any] = { """facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True}, } class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCAmelCase__ = DPRContextEncoderTokenizer class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCAmelCase__ = DPRQuestionEncoderTokenizer _lowerCamelCase : int = collections.namedtuple( """DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""] ) _lowerCamelCase : Any = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""]) _lowerCamelCase : Dict = r""" Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Return: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. """ @add_start_docstrings(UpperCAmelCase__ ) class UpperCamelCase_ : '''simple docstring''' def __call__( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Union[bool, str] = False , UpperCAmelCase__ : Union[bool, str] = False , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Optional[bool] = None , **UpperCAmelCase__ : Optional[int] , ) ->BatchEncoding: '''simple docstring''' if titles is None and texts is None: return super().__call__( UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , ) elif titles is None or texts is None: A__ = titles if texts is None else texts return super().__call__( UpperCAmelCase__ , UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , ) A__ = titles if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [titles] A__ = texts if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [texts] A__ = len(UpperCAmelCase__) A__ = questions if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [questions] * n_passages assert len(UpperCAmelCase__) == len( UpperCAmelCase__), f"""There should be as many titles than texts but got {len(UpperCAmelCase__)} titles and {len(UpperCAmelCase__)} texts.""" A__ = super().__call__(UpperCAmelCase__ , UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__)['''input_ids'''] A__ = super().__call__(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__)['''input_ids'''] A__ = { '''input_ids''': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(UpperCAmelCase__ , UpperCAmelCase__) ] } if return_attention_mask is not False: A__ = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids]) A__ = attention_mask return self.pad(UpperCAmelCase__ , padding=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : BatchEncoding , UpperCAmelCase__ : DPRReaderOutput , UpperCAmelCase__ : int = 16 , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : int = 4 , ) ->List[DPRSpanPrediction]: '''simple docstring''' A__ = reader_input['''input_ids'''] A__ , A__ , A__ = reader_output[:3] A__ = len(UpperCAmelCase__) A__ = sorted(range(UpperCAmelCase__) , reverse=UpperCAmelCase__ , key=relevance_logits.__getitem__) A__ = [] for doc_id in sorted_docs: A__ = list(input_ids[doc_id]) # assuming question & title information is at the beginning of the sequence A__ = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: A__ = sequence_ids.index(self.pad_token_id) else: A__ = len(UpperCAmelCase__) A__ = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCAmelCase__ , top_spans=UpperCAmelCase__ , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCAmelCase__ , start_index=UpperCAmelCase__ , end_index=UpperCAmelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1]) , )) if len(UpperCAmelCase__) >= num_spans: break return nbest_spans_predictions[:num_spans] def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , ) ->List[DPRSpanPrediction]: '''simple docstring''' A__ = [] for start_index, start_score in enumerate(UpperCAmelCase__): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]): scores.append(((start_index, start_index + answer_length), start_score + end_score)) A__ = sorted(UpperCAmelCase__ , key=lambda UpperCAmelCase__: x[1] , reverse=UpperCAmelCase__) A__ = [] for (start_index, end_index), score in scores: assert start_index <= end_index, f"""Wrong span indices: [{start_index}:{end_index}]""" A__ = end_index - start_index + 1 assert length <= max_answer_length, f"""Span is too long: {length} > {max_answer_length}""" if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals): continue chosen_span_intervals.append((start_index, end_index)) if len(UpperCAmelCase__) == top_spans: break return chosen_span_intervals @add_end_docstrings(UpperCAmelCase__ ) class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = READER_PRETRAINED_INIT_CONFIGURATION UpperCAmelCase__ = ['''input_ids''', '''attention_mask'''] UpperCAmelCase__ = DPRReaderTokenizer
14
1
from statistics import mean import numpy as np def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> list: """simple docstring""" A__ = 0 # Number of processes finished A__ = 0 # Displays the finished process. # If it is 0, the performance is completed if it is 1, before the performance. A__ = [0] * no_of_process # List to include calculation results A__ = [0] * no_of_process # Sort by arrival time. A__ = [burst_time[i] for i in np.argsort(lowercase_ )] A__ = [process_name[i] for i in np.argsort(lowercase_ )] arrival_time.sort() while no_of_process > finished_process_count: A__ = 0 while finished_process[i] == 1: i += 1 if current_time < arrival_time[i]: A__ = arrival_time[i] A__ = 0 # Index showing the location of the process being performed A__ = 0 # Saves the current response ratio. A__ = 0 for i in range(0 , lowercase_ ): if finished_process[i] == 0 and arrival_time[i] <= current_time: A__ = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[ i ] if response_ratio < temp: A__ = temp A__ = i # Calculate the turn around time A__ = current_time + burst_time[loc] - arrival_time[loc] current_time += burst_time[loc] # Indicates that the process has been performed. A__ = 1 # Increase finished_process_count by 1 finished_process_count += 1 return turn_around_time def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> list: """simple docstring""" A__ = [0] * no_of_process for i in range(0 , lowercase_ ): A__ = turn_around_time[i] - burst_time[i] return waiting_time if __name__ == "__main__": _lowerCamelCase : List[Any] = 5 _lowerCamelCase : Any = ["""A""", """B""", """C""", """D""", """E"""] _lowerCamelCase : Optional[int] = [1, 2, 3, 4, 5] _lowerCamelCase : Optional[int] = [1, 2, 3, 4, 5] _lowerCamelCase : Union[str, Any] = calculate_turn_around_time( process_name, arrival_time, burst_time, no_of_process ) _lowerCamelCase : Tuple = calculate_waiting_time( process_name, turn_around_time, burst_time, no_of_process ) print("""Process name \tArrival time \tBurst time \tTurn around time \tWaiting time""") for i in range(0, no_of_process): print( F'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t''' F'''{turn_around_time[i]}\t\t\t{waiting_time[i]}''' ) print(F'''average waiting time : {mean(waiting_time):.5f}''') print(F'''average turn around time : {mean(turn_around_time):.5f}''')
14
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase : Any = logging.get_logger(__name__) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''encoder-decoder''' UpperCAmelCase__ = True def __init__( self : List[str] , **UpperCAmelCase__ : Union[str, Any]) ->List[Any]: '''simple docstring''' super().__init__(**UpperCAmelCase__) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" A__ = kwargs.pop('''encoder''') A__ = encoder_config.pop('''model_type''') A__ = kwargs.pop('''decoder''') A__ = decoder_config.pop('''model_type''') from ..auto.configuration_auto import AutoConfig A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__) A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__) A__ = True @classmethod def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : Union[str, Any]) ->PretrainedConfig: '''simple docstring''' logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''') A__ = True A__ = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]: '''simple docstring''' A__ = copy.deepcopy(self.__dict__) A__ = self.encoder.to_dict() A__ = self.decoder.to_dict() A__ = self.__class__.model_type return output
14
1
import collections import importlib.util import os import re from pathlib import Path _lowerCamelCase : str = """src/transformers""" # Matches is_xxx_available() _lowerCamelCase : Optional[int] = re.compile(r"""is\_([a-z_]*)_available()""") # Catches a one-line _import_struct = {xxx} _lowerCamelCase : Tuple = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] _lowerCamelCase : Union[str, Any] = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""") # Catches a line if not is_foo_available _lowerCamelCase : Tuple = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""") # Catches a line _import_struct["bla"].append("foo") _lowerCamelCase : Union[str, Any] = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] _lowerCamelCase : Optional[int] = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""") # Catches a line with an object between quotes and a comma: "MyModel", _lowerCamelCase : Optional[int] = re.compile("""^\s+\"([^\"]+)\",""") # Catches a line with objects between brackets only: ["foo", "bar"], _lowerCamelCase : int = re.compile("""^\s+\[([^\]]+)\]""") # Catches a line with from foo import bar, bla, boo _lowerCamelCase : str = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""") # Catches a line with try: _lowerCamelCase : str = re.compile(r"""^\s*try:""") # Catches a line with else: _lowerCamelCase : Any = re.compile(r"""^\s*else:""") def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any: """simple docstring""" if _re_test_backend.search(lowercase_ ) is None: return None A__ = [b[0] for b in _re_backend.findall(lowercase_ )] backends.sort() return "_and_".join(lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int: """simple docstring""" with open(lowercase_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: A__ = f.readlines() A__ = 0 while line_index < len(lowercase_ ) and not lines[line_index].startswith('''_import_structure = {''' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(lowercase_ ): return None # First grab the objects without a specific backend in _import_structure A__ = [] while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None: A__ = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(lowercase_ ): A__ = _re_one_line_import_struct.search(lowercase_ ).groups()[0] A__ = re.findall('''\[([^\]]+)\]''' , lowercase_ ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] ) line_index += 1 continue A__ = _re_import_struct_key_value.search(lowercase_ ) if single_line_import_search is not None: A__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(lowercase_ ) > 0] objects.extend(lowercase_ ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) line_index += 1 A__ = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING''' ): # If the line is an if not is_backend_available, we grab all objects associated. A__ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: A__ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 A__ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ): A__ = lines[line_index] if _re_import_struct_add_one.search(lowercase_ ) is not None: objects.append(_re_import_struct_add_one.search(lowercase_ ).groups()[0] ) elif _re_import_struct_add_many.search(lowercase_ ) is not None: A__ = _re_import_struct_add_many.search(lowercase_ ).groups()[0].split(''', ''' ) A__ = [obj[1:-1] for obj in imports if len(lowercase_ ) > 0] objects.extend(lowercase_ ) elif _re_between_brackets.search(lowercase_ ) is not None: A__ = _re_between_brackets.search(lowercase_ ).groups()[0].split(''', ''' ) A__ = [obj[1:-1] for obj in imports if len(lowercase_ ) > 0] objects.extend(lowercase_ ) elif _re_quote_object.search(lowercase_ ) is not None: objects.append(_re_quote_object.search(lowercase_ ).groups()[0] ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) elif line.startswith(''' ''' * 12 + '''"''' ): objects.append(line[13:-3] ) line_index += 1 A__ = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend A__ = [] while ( line_index < len(lowercase_ ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('''else''' ) ): A__ = lines[line_index] A__ = _re_import.search(lowercase_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 A__ = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(lowercase_ ): # If the line is an if is_backend_available, we grab all objects associated. A__ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: A__ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 A__ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ): A__ = lines[line_index] A__ = _re_import.search(lowercase_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 12 ): objects.append(line[12:-2] ) line_index += 1 A__ = objects else: line_index += 1 return import_dict_objects, type_hint_objects def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int: """simple docstring""" def find_duplicates(lowercase_ ): return [k for k, v in collections.Counter(lowercase_ ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] A__ = [] for key in import_dict_objects.keys(): A__ = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(f"""Duplicate _import_structure definitions for: {duplicate_imports}""" ) A__ = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(f"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): A__ = '''base imports''' if key == '''none''' else f"""{key} backend""" errors.append(f"""Differences for {name}:""" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(f""" {a} in TYPE_HINT but not in _import_structure.""" ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(f""" {a} in _import_structure but not in TYPE_HINT.""" ) return errors def SCREAMING_SNAKE_CASE ( ) -> List[Any]: """simple docstring""" A__ = [] for root, _, files in os.walk(lowercase_ ): if "__init__.py" in files: A__ = os.path.join(lowercase_ , '''__init__.py''' ) A__ = parse_init(lowercase_ ) if objects is not None: A__ = analyze_results(*lowercase_ ) if len(lowercase_ ) > 0: A__ = f"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}""" failures.append('''\n'''.join(lowercase_ ) ) if len(lowercase_ ) > 0: raise ValueError('''\n\n'''.join(lowercase_ ) ) def SCREAMING_SNAKE_CASE ( ) -> Any: """simple docstring""" A__ = [] for path, directories, files in os.walk(lowercase_ ): for folder in directories: # Ignore private modules if folder.startswith('''_''' ): directories.remove(lowercase_ ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(lowercase_ ) / folder).glob('''*.py''' ) ) ) == 0: continue A__ = str((Path(lowercase_ ) / folder).relative_to(lowercase_ ) ) A__ = short_path.replace(os.path.sep , '''.''' ) submodules.append(lowercase_ ) for fname in files: if fname == "__init__.py": continue A__ = str((Path(lowercase_ ) / fname).relative_to(lowercase_ ) ) A__ = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' ) if len(submodule.split('''.''' ) ) == 1: submodules.append(lowercase_ ) return submodules _lowerCamelCase : int = [ """convert_pytorch_checkpoint_to_tf2""", """modeling_flax_pytorch_utils""", ] def SCREAMING_SNAKE_CASE ( ) -> Optional[int]: """simple docstring""" A__ = importlib.util.spec_from_file_location( '''transformers''' , os.path.join(lowercase_ , '''__init__.py''' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , ) A__ = spec.loader.load_module() A__ = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(lowercase_ ) > 0: A__ = '''\n'''.join(f"""- {module}""" for module in module_not_registered ) raise ValueError( '''The following submodules are not properly registered in the main init of Transformers:\n''' f"""{list_of_modules}\n""" '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' ) if __name__ == "__main__": check_all_inits() check_submodules()
14
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any: """simple docstring""" A__ = [0] * len(lowercase_ ) A__ = [] A__ = [1] * len(lowercase_ ) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(lowercase_ ) ): if indegree[i] == 0: queue.append(lowercase_ ) while queue: A__ = queue.pop(0 ) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: A__ = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(lowercase_ ) print(max(lowercase_ ) ) # Adjacency list of Graph _lowerCamelCase : Optional[int] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
14
1
from __future__ import annotations import queue class UpperCamelCase_ : '''simple docstring''' def __init__( self : str , UpperCAmelCase__ : str) ->Tuple: '''simple docstring''' A__ = data A__ = None A__ = None def SCREAMING_SNAKE_CASE ( ) -> TreeNode: """simple docstring""" print('''\n********Press N to stop entering at any point of time********\n''' ) A__ = input('''Enter the value of the root node: ''' ).strip().lower() A__ = queue.Queue() A__ = TreeNode(int(lowercase_ ) ) q.put(lowercase_ ) while not q.empty(): A__ = q.get() A__ = f"""Enter the left node of {node_found.data}: """ A__ = input(lowercase_ ).strip().lower() or '''n''' if check == "n": return tree_node A__ = TreeNode(int(lowercase_ ) ) A__ = left_node q.put(lowercase_ ) A__ = f"""Enter the right node of {node_found.data}: """ A__ = input(lowercase_ ).strip().lower() or '''n''' if check == "n": return tree_node A__ = TreeNode(int(lowercase_ ) ) A__ = right_node q.put(lowercase_ ) raise def SCREAMING_SNAKE_CASE ( lowercase_ ) -> None: """simple docstring""" if not isinstance(lowercase_ , lowercase_ ) or not node: return print(node.data , end=''',''' ) pre_order(node.left ) pre_order(node.right ) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> None: """simple docstring""" if not isinstance(lowercase_ , lowercase_ ) or not node: return in_order(node.left ) print(node.data , end=''',''' ) in_order(node.right ) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> None: """simple docstring""" if not isinstance(lowercase_ , lowercase_ ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data , end=''',''' ) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> None: """simple docstring""" if not isinstance(lowercase_ , lowercase_ ) or not node: return A__ = queue.Queue() q.put(lowercase_ ) while not q.empty(): A__ = q.get() print(node_dequeued.data , end=''',''' ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> None: """simple docstring""" if not isinstance(lowercase_ , lowercase_ ) or not node: return A__ = queue.Queue() q.put(lowercase_ ) while not q.empty(): A__ = [] while not q.empty(): A__ = q.get() print(node_dequeued.data , end=''',''' ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> None: """simple docstring""" if not isinstance(lowercase_ , lowercase_ ) or not node: return A__ = [] A__ = node while n or stack: while n: # start from root node, find its left child print(n.data , end=''',''' ) stack.append(lowercase_ ) A__ = n.left # end of while means current node doesn't have left child A__ = stack.pop() # start to traverse its right child A__ = n.right def SCREAMING_SNAKE_CASE ( lowercase_ ) -> None: """simple docstring""" if not isinstance(lowercase_ , lowercase_ ) or not node: return A__ = [] A__ = node while n or stack: while n: stack.append(lowercase_ ) A__ = n.left A__ = stack.pop() print(n.data , end=''',''' ) A__ = n.right def SCREAMING_SNAKE_CASE ( lowercase_ ) -> None: """simple docstring""" if not isinstance(lowercase_ , lowercase_ ) or not node: return A__ , A__ = [], [] A__ = node stacka.append(lowercase_ ) while stacka: # to find the reversed order of post order, store it in stack2 A__ = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(lowercase_ ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end=''',''' ) def SCREAMING_SNAKE_CASE ( lowercase_ = "" , lowercase_=50 , lowercase_="*" ) -> str: """simple docstring""" if not s: return "\n" + width * char A__ , A__ = divmod(width - len(lowercase_ ) - 2 , 2 ) return f"""{left * char} {s} {(left + extra) * char}""" if __name__ == "__main__": import doctest doctest.testmod() print(prompt("""Binary Tree Traversals""")) _lowerCamelCase : TreeNode = build_tree() print(prompt("""Pre Order Traversal""")) pre_order(node) print(prompt() + """\n""") print(prompt("""In Order Traversal""")) in_order(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal""")) post_order(node) print(prompt() + """\n""") print(prompt("""Level Order Traversal""")) level_order(node) print(prompt() + """\n""") print(prompt("""Actual Level Order Traversal""")) level_order_actual(node) print("""*""" * 50 + """\n""") print(prompt("""Pre Order Traversal - Iteration Version""")) pre_order_iter(node) print(prompt() + """\n""") print(prompt("""In Order Traversal - Iteration Version""")) in_order_iter(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal - Iteration Version""")) post_order_iter(node) print(prompt())
14
import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline _lowerCamelCase : Optional[Any] = datasets.utils.logging.get_logger(__name__) @dataclass class UpperCamelCase_ ( datasets.BuilderConfig ): '''simple docstring''' UpperCAmelCase__ = None UpperCAmelCase__ = "utf-8" UpperCAmelCase__ = None UpperCAmelCase__ = None UpperCAmelCase__ = True # deprecated UpperCAmelCase__ = None # deprecated UpperCAmelCase__ = 10 << 20 # 10MB UpperCAmelCase__ = None class UpperCamelCase_ ( datasets.ArrowBasedBuilder ): '''simple docstring''' UpperCAmelCase__ = JsonConfig def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str: '''simple docstring''' if self.config.block_size is not None: logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''') A__ = self.config.block_size if self.config.use_threads is not True: logger.warning( '''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''') if self.config.newlines_in_values is not None: raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''') return datasets.DatasetInfo(features=self.config.features) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : List[Any]) ->Dict: '''simple docstring''' if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""") A__ = dl_manager.download_and_extract(self.config.data_files) if isinstance(UpperCAmelCase__ , (str, list, tuple)): A__ = data_files if isinstance(UpperCAmelCase__ , UpperCAmelCase__): A__ = [files] A__ = [dl_manager.iter_files(UpperCAmelCase__) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files})] A__ = [] for split_name, files in data_files.items(): if isinstance(UpperCAmelCase__ , UpperCAmelCase__): A__ = [files] A__ = [dl_manager.iter_files(UpperCAmelCase__) for file in files] splits.append(datasets.SplitGenerator(name=UpperCAmelCase__ , gen_kwargs={'''files''': files})) return splits def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : pa.Table) ->pa.Table: '''simple docstring''' if self.config.features is not None: # adding missing columns for column_name in set(self.config.features) - set(pa_table.column_names): A__ = self.config.features.arrow_schema.field(UpperCAmelCase__).type A__ = pa_table.append_column(UpperCAmelCase__ , pa.array([None] * len(UpperCAmelCase__) , type=UpperCAmelCase__)) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example A__ = table_cast(UpperCAmelCase__ , self.config.features.arrow_schema) return pa_table def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Tuple) ->str: '''simple docstring''' for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase__)): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(UpperCAmelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f: A__ = json.load(UpperCAmelCase__) # We keep only the field we are interested in A__ = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(UpperCAmelCase__ , (list, tuple)): A__ = set().union(*[row.keys() for row in dataset]) A__ = {col: [row.get(UpperCAmelCase__) for row in dataset] for col in keys} else: A__ = dataset A__ = pa.Table.from_pydict(UpperCAmelCase__) yield file_idx, self._cast_table(UpperCAmelCase__) # If the file has one json object per line else: with open(UpperCAmelCase__ , '''rb''') as f: A__ = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small A__ = max(self.config.chunksize // 32 , 16 << 10) A__ = ( self.config.encoding_errors if self.config.encoding_errors is not None else '''strict''' ) while True: A__ = f.read(self.config.chunksize) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(UpperCAmelCase__) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": A__ = batch.decode(self.config.encoding , errors=UpperCAmelCase__).encode('''utf-8''') try: while True: try: A__ = paj.read_json( io.BytesIO(UpperCAmelCase__) , read_options=paj.ReadOptions(block_size=UpperCAmelCase__)) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(UpperCAmelCase__ , pa.ArrowInvalid) and "straddling" not in str(UpperCAmelCase__) or block_size > len(UpperCAmelCase__) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( f"""Batch of {len(UpperCAmelCase__)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""") block_size *= 2 except pa.ArrowInvalid as e: try: with open( UpperCAmelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f: A__ = json.load(UpperCAmelCase__) except json.JSONDecodeError: logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""") raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(UpperCAmelCase__ , UpperCAmelCase__): # list is the only sequence type supported in JSON try: A__ = set().union(*[row.keys() for row in dataset]) A__ = {col: [row.get(UpperCAmelCase__) for row in dataset] for col in keys} A__ = pa.Table.from_pydict(UpperCAmelCase__) except (pa.ArrowInvalid, AttributeError) as e: logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""") raise ValueError(f"""Not able to read records in the JSON file at {file}.""") from None yield file_idx, self._cast_table(UpperCAmelCase__) break else: logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""") raise ValueError( f"""Not able to read records in the JSON file at {file}. """ f"""You should probably indicate the field of the JSON file containing your records. """ f"""This JSON file contain the following fields: {str(list(dataset.keys()))}. """ f"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase__) batch_idx += 1
14
1
def SCREAMING_SNAKE_CASE ( lowercase_ = 600_851_475_143 ) -> int: """simple docstring""" try: A__ = int(lowercase_ ) except (TypeError, ValueError): raise TypeError('''Parameter n must be int or castable to int.''' ) if n <= 0: raise ValueError('''Parameter n must be greater than or equal to one.''' ) A__ = 2 A__ = 0 if n == 2: return 2 while n > 2: while n % i != 0: i += 1 A__ = i while n % i == 0: A__ = n // i i += 1 return int(lowercase_ ) if __name__ == "__main__": print(F'''{solution() = }''')
14
import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch _lowerCamelCase : List[Any] = """sshleifer/bart-tiny-random""" _lowerCamelCase : List[Any] = """patrickvonplaten/t5-tiny-random""" @require_torch class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int: '''simple docstring''' return AutoConfig.from_pretrained(UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Any: '''simple docstring''' A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1) self.assertEqual(student.config.num_hidden_layers , 1) def SCREAMING_SNAKE_CASE ( self : int) ->Any: '''simple docstring''' A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]: '''simple docstring''' A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase__) self.assertEqual(student.config.encoder_layers , 1) self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers) def SCREAMING_SNAKE_CASE ( self : Dict) ->int: '''simple docstring''' A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1) self.assertEqual(student.config.encoder_layers , 1) self.assertEqual(student.config.decoder_layers , 1) def SCREAMING_SNAKE_CASE ( self : str) ->List[Any]: '''simple docstring''' with self.assertRaises(UpperCAmelCase__): create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=UpperCAmelCase__ , d=UpperCAmelCase__)
14
1
import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = (DDPMScheduler,) def SCREAMING_SNAKE_CASE ( self : Optional[int] , **UpperCAmelCase__ : Any) ->Dict: '''simple docstring''' A__ = { '''num_train_timesteps''': 1_000, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''variance_type''': '''fixed_small''', '''clip_sample''': True, } config.update(**UpperCAmelCase__) return config def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Dict: '''simple docstring''' for timesteps in [1, 5, 100, 1_000]: self.check_over_configs(num_train_timesteps=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Dict) ->Union[str, Any]: '''simple docstring''' for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2]): self.check_over_configs(beta_start=UpperCAmelCase__ , beta_end=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Dict: '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : str) ->Union[str, Any]: '''simple docstring''' for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Any: '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[int]: '''simple docstring''' self.check_over_configs(thresholding=UpperCAmelCase__) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=UpperCAmelCase__ , prediction_type=UpperCAmelCase__ , sample_max_value=UpperCAmelCase__ , ) def SCREAMING_SNAKE_CASE ( self : Dict) ->Union[str, Any]: '''simple docstring''' for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[int]: '''simple docstring''' for t in [0, 500, 999]: self.check_over_forward(time_step=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str]) ->int: '''simple docstring''' A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config() A__ = scheduler_class(**UpperCAmelCase__) assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.00979)) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1e-5 def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any: '''simple docstring''' A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config() A__ = scheduler_class(**UpperCAmelCase__) A__ = len(UpperCAmelCase__) A__ = self.dummy_model() A__ = self.dummy_sample_deter A__ = torch.manual_seed(0) for t in reversed(range(UpperCAmelCase__)): # 1. predict noise residual A__ = model(UpperCAmelCase__ , UpperCAmelCase__) # 2. predict previous mean of sample x_t-1 A__ = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance A__ = pred_prev_sample A__ = torch.sum(torch.abs(UpperCAmelCase__)) A__ = torch.mean(torch.abs(UpperCAmelCase__)) assert abs(result_sum.item() - 258.9606) < 1e-2 assert abs(result_mean.item() - 0.3372) < 1e-3 def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Any: '''simple docstring''' A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config(prediction_type='''v_prediction''') A__ = scheduler_class(**UpperCAmelCase__) A__ = len(UpperCAmelCase__) A__ = self.dummy_model() A__ = self.dummy_sample_deter A__ = torch.manual_seed(0) for t in reversed(range(UpperCAmelCase__)): # 1. predict noise residual A__ = model(UpperCAmelCase__ , UpperCAmelCase__) # 2. predict previous mean of sample x_t-1 A__ = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance A__ = pred_prev_sample A__ = torch.sum(torch.abs(UpperCAmelCase__)) A__ = torch.mean(torch.abs(UpperCAmelCase__)) assert abs(result_sum.item() - 202.0296) < 1e-2 assert abs(result_mean.item() - 0.2631) < 1e-3 def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[int]: '''simple docstring''' A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config() A__ = scheduler_class(**UpperCAmelCase__) A__ = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=UpperCAmelCase__) A__ = scheduler.timesteps for i, timestep in enumerate(UpperCAmelCase__): if i == len(UpperCAmelCase__) - 1: A__ = -1 else: A__ = timesteps[i + 1] A__ = scheduler.previous_timestep(UpperCAmelCase__) A__ = prev_t.item() self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : str) ->Dict: '''simple docstring''' A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config() A__ = scheduler_class(**UpperCAmelCase__) A__ = [100, 87, 50, 51, 0] with self.assertRaises(UpperCAmelCase__ , msg='''`custom_timesteps` must be in descending order.'''): scheduler.set_timesteps(timesteps=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Dict: '''simple docstring''' A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config() A__ = scheduler_class(**UpperCAmelCase__) A__ = [100, 87, 50, 1, 0] A__ = len(UpperCAmelCase__) with self.assertRaises(UpperCAmelCase__ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.'''): scheduler.set_timesteps(num_inference_steps=UpperCAmelCase__ , timesteps=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any: '''simple docstring''' A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config() A__ = scheduler_class(**UpperCAmelCase__) A__ = [scheduler.config.num_train_timesteps] with self.assertRaises( UpperCAmelCase__ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=UpperCAmelCase__)
14
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : Dict=3 , UpperCAmelCase__ : List[Any]=30 , UpperCAmelCase__ : Any=400 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Optional[Any]=[0.5, 0.5, 0.5] , UpperCAmelCase__ : Any=[0.5, 0.5, 0.5] , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Optional[int]=1 / 255 , UpperCAmelCase__ : Optional[Any]=True , ) ->str: '''simple docstring''' A__ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333} A__ = parent A__ = batch_size A__ = num_channels A__ = min_resolution A__ = max_resolution A__ = do_resize A__ = size A__ = do_normalize A__ = image_mean A__ = image_std A__ = do_rescale A__ = rescale_factor A__ = do_pad def SCREAMING_SNAKE_CASE ( self : Any) ->List[str]: '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int=False) ->Optional[Any]: '''simple docstring''' if not batched: A__ = image_inputs[0] if isinstance(UpperCAmelCase__ , Image.Image): A__ , A__ = image.size else: A__ , A__ = image.shape[1], image.shape[2] if w < h: A__ = int(self.size['''shortest_edge'''] * h / w) A__ = self.size['''shortest_edge'''] elif w > h: A__ = self.size['''shortest_edge'''] A__ = int(self.size['''shortest_edge'''] * w / h) else: A__ = self.size['''shortest_edge'''] A__ = self.size['''shortest_edge'''] else: A__ = [] for image in image_inputs: A__ , A__ = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) A__ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__: item[0])[0] A__ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__: item[1])[1] return expected_height, expected_width @require_torch @require_vision class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = DeformableDetrImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple: '''simple docstring''' A__ = DeformableDetrImageProcessingTester(self) @property def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(UpperCAmelCase__ , '''image_mean''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''image_std''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_normalize''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_rescale''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_pad''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''size''')) def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int: '''simple docstring''' A__ = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1_333}) self.assertEqual(image_processor.do_pad , UpperCAmelCase__) A__ = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCAmelCase__) self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84}) self.assertEqual(image_processor.do_pad , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Any) ->List[str]: '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) # create random PIL images A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , Image.Image) # Test not batched input A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__) A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , np.ndarray) # Test not batched input A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE ( self : int) ->Tuple: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , torch.Tensor) # Test not batched input A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[str]: '''simple docstring''' A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''') with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''') as f: A__ = json.loads(f.read()) A__ = {'''image_id''': 39_769, '''annotations''': target} # encode them A__ = DeformableDetrImageProcessor() A__ = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , return_tensors='''pt''') # verify pixel values A__ = torch.Size([1, 3, 800, 1_066]) self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase__) A__ = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase__ , atol=1e-4)) # verify area A__ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase__)) # verify boxes A__ = torch.Size([6, 4]) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase__) A__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase__ , atol=1e-3)) # verify image_id A__ = torch.tensor([39_769]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase__)) # verify is_crowd A__ = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase__)) # verify class_labels A__ = torch.tensor([75, 75, 63, 65, 17, 17]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase__)) # verify orig_size A__ = torch.tensor([480, 640]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase__)) # verify size A__ = torch.tensor([800, 1_066]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase__)) @slow def SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[int]: '''simple docstring''' A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''') with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''') as f: A__ = json.loads(f.read()) A__ = {'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target} A__ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''') # encode them A__ = DeformableDetrImageProcessor(format='''coco_panoptic''') A__ = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , masks_path=UpperCAmelCase__ , return_tensors='''pt''') # verify pixel values A__ = torch.Size([1, 3, 800, 1_066]) self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase__) A__ = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase__ , atol=1e-4)) # verify area A__ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase__)) # verify boxes A__ = torch.Size([6, 4]) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase__) A__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase__ , atol=1e-3)) # verify image_id A__ = torch.tensor([39_769]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase__)) # verify is_crowd A__ = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase__)) # verify class_labels A__ = torch.tensor([17, 17, 63, 75, 75, 93]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase__)) # verify masks A__ = 822_873 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCAmelCase__) # verify orig_size A__ = torch.tensor([480, 640]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase__)) # verify size A__ = torch.tensor([800, 1_066]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase__))
14
1
_lowerCamelCase : int = { """a""": """AAAAA""", """b""": """AAAAB""", """c""": """AAABA""", """d""": """AAABB""", """e""": """AABAA""", """f""": """AABAB""", """g""": """AABBA""", """h""": """AABBB""", """i""": """ABAAA""", """j""": """BBBAA""", """k""": """ABAAB""", """l""": """ABABA""", """m""": """ABABB""", """n""": """ABBAA""", """o""": """ABBAB""", """p""": """ABBBA""", """q""": """ABBBB""", """r""": """BAAAA""", """s""": """BAAAB""", """t""": """BAABA""", """u""": """BAABB""", """v""": """BBBAB""", """w""": """BABAA""", """x""": """BABAB""", """y""": """BABBA""", """z""": """BABBB""", """ """: """ """, } _lowerCamelCase : Union[str, Any] = {value: key for key, value in encode_dict.items()} def SCREAMING_SNAKE_CASE ( lowercase_ ) -> str: """simple docstring""" A__ = '''''' for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception('''encode() accepts only letters of the alphabet and spaces''' ) return encoded def SCREAMING_SNAKE_CASE ( lowercase_ ) -> str: """simple docstring""" if set(lowercase_ ) - {"A", "B", " "} != set(): raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' ) A__ = '''''' for word in coded.split(): while len(lowercase_ ) != 0: decoded += decode_dict[word[:5]] A__ = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
14
from __future__ import annotations import typing from collections.abc import Iterable import numpy as np _lowerCamelCase : str = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007 _lowerCamelCase : Tuple = typing.Union[np.floataa, int, float] # noqa: UP007 def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> VectorOut: """simple docstring""" return np.sqrt(np.sum((np.asarray(lowercase_ ) - np.asarray(lowercase_ )) ** 2 ) ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> VectorOut: """simple docstring""" return sum((va - va) ** 2 for va, va in zip(lowercase_ , lowercase_ ) ) ** (1 / 2) if __name__ == "__main__": def SCREAMING_SNAKE_CASE ( ) -> None: """simple docstring""" from timeit import timeit print('''Without Numpy''' ) print( timeit( '''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ) ) print('''With Numpy''' ) print( timeit( '''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ) ) benchmark()
14
1
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase : Any = logging.get_logger(__name__) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''encoder-decoder''' UpperCAmelCase__ = True def __init__( self : List[str] , **UpperCAmelCase__ : Union[str, Any]) ->List[Any]: '''simple docstring''' super().__init__(**UpperCAmelCase__) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" A__ = kwargs.pop('''encoder''') A__ = encoder_config.pop('''model_type''') A__ = kwargs.pop('''decoder''') A__ = decoder_config.pop('''model_type''') from ..auto.configuration_auto import AutoConfig A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__) A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__) A__ = True @classmethod def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : Union[str, Any]) ->PretrainedConfig: '''simple docstring''' logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''') A__ = True A__ = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]: '''simple docstring''' A__ = copy.deepcopy(self.__dict__) A__ = self.encoder.to_dict() A__ = self.decoder.to_dict() A__ = self.__class__.model_type return output
14
from ...processing_utils import ProcessorMixin class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''SpeechT5FeatureExtractor''' UpperCAmelCase__ = '''SpeechT5Tokenizer''' def __init__( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple) ->Union[str, Any]: '''simple docstring''' super().__init__(UpperCAmelCase__ , UpperCAmelCase__) def __call__( self : Dict , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Any) ->Optional[Any]: '''simple docstring''' A__ = kwargs.pop('''audio''' , UpperCAmelCase__) A__ = kwargs.pop('''text''' , UpperCAmelCase__) A__ = kwargs.pop('''text_target''' , UpperCAmelCase__) A__ = kwargs.pop('''audio_target''' , UpperCAmelCase__) A__ = kwargs.pop('''sampling_rate''' , UpperCAmelCase__) if audio is not None and text is not None: raise ValueError( '''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''') if audio_target is not None and text_target is not None: raise ValueError( '''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''') if audio is None and audio_target is None and text is None and text_target is None: raise ValueError( '''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''') if audio is not None: A__ = self.feature_extractor(UpperCAmelCase__ , *UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , **UpperCAmelCase__) elif text is not None: A__ = self.tokenizer(UpperCAmelCase__ , **UpperCAmelCase__) else: A__ = None if audio_target is not None: A__ = self.feature_extractor(audio_target=UpperCAmelCase__ , *UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , **UpperCAmelCase__) A__ = targets['''input_values'''] elif text_target is not None: A__ = self.tokenizer(UpperCAmelCase__ , **UpperCAmelCase__) A__ = targets['''input_ids'''] else: A__ = None if inputs is None: return targets if targets is not None: A__ = labels A__ = targets.get('''attention_mask''') if decoder_attention_mask is not None: A__ = decoder_attention_mask return inputs def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : int) ->Optional[int]: '''simple docstring''' A__ = kwargs.pop('''input_values''' , UpperCAmelCase__) A__ = kwargs.pop('''input_ids''' , UpperCAmelCase__) A__ = kwargs.pop('''labels''' , UpperCAmelCase__) if input_values is not None and input_ids is not None: raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''') if input_values is None and input_ids is None and labels is None: raise ValueError( '''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''') if input_values is not None: A__ = self.feature_extractor.pad(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__) elif input_ids is not None: A__ = self.tokenizer.pad(UpperCAmelCase__ , **UpperCAmelCase__) else: A__ = None if labels is not None: if "input_ids" in labels or (isinstance(UpperCAmelCase__ , UpperCAmelCase__) and "input_ids" in labels[0]): A__ = self.tokenizer.pad(UpperCAmelCase__ , **UpperCAmelCase__) A__ = targets['''input_ids'''] else: A__ = self.feature_extractor.feature_size A__ = self.feature_extractor.num_mel_bins A__ = self.feature_extractor.pad(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__) A__ = feature_size_hack A__ = targets['''input_values'''] else: A__ = None if inputs is None: return targets if targets is not None: A__ = labels A__ = targets.get('''attention_mask''') if decoder_attention_mask is not None: A__ = decoder_attention_mask return inputs def SCREAMING_SNAKE_CASE ( self : Any , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Optional[Any]) ->Optional[Any]: '''simple docstring''' return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Union[str, Any]) ->Dict: '''simple docstring''' return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__)
14
1
import argparse import re import numpy as np import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SamConfig, SamImageProcessor, SamModel, SamProcessor, SamVisionConfig, ) _lowerCamelCase : List[str] = { """iou_prediction_head.layers.0""": """iou_prediction_head.proj_in""", """iou_prediction_head.layers.1""": """iou_prediction_head.layers.0""", """iou_prediction_head.layers.2""": """iou_prediction_head.proj_out""", """mask_decoder.output_upscaling.0""": """mask_decoder.upscale_conv1""", """mask_decoder.output_upscaling.1""": """mask_decoder.upscale_layer_norm""", """mask_decoder.output_upscaling.3""": """mask_decoder.upscale_conv2""", """mask_downscaling.0""": """mask_embed.conv1""", """mask_downscaling.1""": """mask_embed.layer_norm1""", """mask_downscaling.3""": """mask_embed.conv2""", """mask_downscaling.4""": """mask_embed.layer_norm2""", """mask_downscaling.6""": """mask_embed.conv3""", """point_embeddings""": """point_embed""", """pe_layer.positional_encoding_gaussian_matrix""": """shared_embedding.positional_embedding""", """image_encoder""": """vision_encoder""", """neck.0""": """neck.conv1""", """neck.1""": """neck.layer_norm1""", """neck.2""": """neck.conv2""", """neck.3""": """neck.layer_norm2""", """patch_embed.proj""": """patch_embed.projection""", """.norm""": """.layer_norm""", """blocks""": """layers""", } def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[Any]: """simple docstring""" A__ = {} state_dict.pop('''pixel_mean''' , lowercase_ ) state_dict.pop('''pixel_std''' , lowercase_ ) A__ = R'''.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*''' for key, value in state_dict.items(): for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: A__ = key.replace(lowercase_ , lowercase_ ) if re.match(lowercase_ , lowercase_ ): A__ = int(re.match(lowercase_ , lowercase_ ).group(2 ) ) if layer_nb == 0: A__ = key.replace('''layers.0''' , '''proj_in''' ) elif layer_nb == 1: A__ = key.replace('''layers.1''' , '''layers.0''' ) elif layer_nb == 2: A__ = key.replace('''layers.2''' , '''proj_out''' ) A__ = value A__ = model_state_dict[ '''prompt_encoder.shared_embedding.positional_embedding''' ] return model_state_dict def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_="ybelkada/segment-anything" ) -> Union[str, Any]: """simple docstring""" A__ = hf_hub_download(lowercase_ , f"""checkpoints/{model_name}.pth""" ) if "sam_vit_b" in model_name: A__ = SamConfig() elif "sam_vit_l" in model_name: A__ = SamVisionConfig( hidden_size=1_024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , ) A__ = SamConfig( vision_config=lowercase_ , ) elif "sam_vit_h" in model_name: A__ = SamVisionConfig( hidden_size=1_280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , ) A__ = SamConfig( vision_config=lowercase_ , ) A__ = torch.load(lowercase_ , map_location='''cpu''' ) A__ = replace_keys(lowercase_ ) A__ = SamImageProcessor() A__ = SamProcessor(image_processor=lowercase_ ) A__ = SamModel(lowercase_ ) hf_model.load_state_dict(lowercase_ ) A__ = hf_model.to('''cuda''' ) A__ = '''https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png''' A__ = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw ).convert('''RGB''' ) A__ = [[[400, 650]]] A__ = [[1]] A__ = processor(images=np.array(lowercase_ ) , return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): A__ = hf_model(**lowercase_ ) A__ = output.iou_scores.squeeze() if model_name == "sam_vit_h_4b8939": assert scores[-1].item() == 0.5_79_89_02_51_15_96_68 A__ = processor( images=np.array(lowercase_ ) , input_points=lowercase_ , input_labels=lowercase_ , return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): A__ = hf_model(**lowercase_ ) A__ = output.iou_scores.squeeze() assert scores[-1].item() == 0.97_12_60_30_92_19_36_04 A__ = ((75, 275, 1_725, 850),) A__ = processor(images=np.array(lowercase_ ) , input_boxes=lowercase_ , return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): A__ = hf_model(**lowercase_ ) A__ = output.iou_scores.squeeze() assert scores[-1].item() == 0.86_86_01_56_05_92_65_14 # Test with 2 points and 1 image. A__ = [[[400, 650], [800, 650]]] A__ = [[1, 1]] A__ = processor( images=np.array(lowercase_ ) , input_points=lowercase_ , input_labels=lowercase_ , return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): A__ = hf_model(**lowercase_ ) A__ = output.iou_scores.squeeze() assert scores[-1].item() == 0.99_36_04_77_92_43_46_92 if __name__ == "__main__": _lowerCamelCase : Any = argparse.ArgumentParser() _lowerCamelCase : Union[str, Any] = ["""sam_vit_b_01ec64""", """sam_vit_h_4b8939""", """sam_vit_l_0b3195"""] parser.add_argument( """--model_name""", default="""sam_vit_h_4b8939""", choices=choices, type=str, help="""Path to hf config.json of model to convert""", ) parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model and processor to the hub after converting""", ) parser.add_argument( """--model_hub_id""", default="""ybelkada/segment-anything""", choices=choices, type=str, help="""Path to hf config.json of model to convert""", ) _lowerCamelCase : Optional[int] = parser.parse_args() convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
14
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase : Tuple = logging.get_logger(__name__) _lowerCamelCase : str = { """microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""", } class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''git_vision_model''' def __init__( self : Any , UpperCAmelCase__ : Any=768 , UpperCAmelCase__ : int=3_072 , UpperCAmelCase__ : List[str]=12 , UpperCAmelCase__ : Dict=12 , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : List[Any]=224 , UpperCAmelCase__ : Union[str, Any]=16 , UpperCAmelCase__ : Union[str, Any]="quick_gelu" , UpperCAmelCase__ : Dict=1e-5 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Any=0.02 , **UpperCAmelCase__ : Any , ) ->Optional[int]: '''simple docstring''' super().__init__(**UpperCAmelCase__) A__ = hidden_size A__ = intermediate_size A__ = num_hidden_layers A__ = num_attention_heads A__ = num_channels A__ = patch_size A__ = image_size A__ = initializer_range A__ = attention_dropout A__ = layer_norm_eps A__ = hidden_act @classmethod def SCREAMING_SNAKE_CASE ( cls : Any , UpperCAmelCase__ : Union[str, os.PathLike] , **UpperCAmelCase__ : int) ->"PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(UpperCAmelCase__) A__ , A__ = cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__) # get the vision config dict if we are loading from GITConfig if config_dict.get('''model_type''') == "git": A__ = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""") return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''git''' def __init__( self : Dict , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int=30_522 , UpperCAmelCase__ : Optional[int]=768 , UpperCAmelCase__ : Dict=6 , UpperCAmelCase__ : int=12 , UpperCAmelCase__ : List[str]=3_072 , UpperCAmelCase__ : str="gelu" , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : List[Any]=1_024 , UpperCAmelCase__ : List[str]=0.02 , UpperCAmelCase__ : Any=1e-12 , UpperCAmelCase__ : Union[str, Any]=0 , UpperCAmelCase__ : List[Any]="absolute" , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : int=101 , UpperCAmelCase__ : Tuple=102 , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : List[str] , ) ->Any: '''simple docstring''' super().__init__(bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__) if vision_config is None: A__ = {} logger.info('''vision_config is None. initializing the GitVisionConfig with default values.''') A__ = GitVisionConfig(**UpperCAmelCase__) A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = hidden_act A__ = intermediate_size A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = initializer_range A__ = layer_norm_eps A__ = position_embedding_type A__ = use_cache A__ = tie_word_embeddings A__ = num_image_with_embedding A__ = bos_token_id A__ = eos_token_id def SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]: '''simple docstring''' A__ = copy.deepcopy(self.__dict__) A__ = self.vision_config.to_dict() A__ = self.__class__.model_type return output
14
1
_lowerCamelCase : Tuple = { 0: """0""", 1: """1""", 2: """2""", 3: """3""", 4: """4""", 5: """5""", 6: """6""", 7: """7""", 8: """8""", 9: """9""", 10: """a""", 11: """b""", 12: """c""", 13: """d""", 14: """e""", 15: """f""", } def SCREAMING_SNAKE_CASE ( lowercase_ ) -> str: """simple docstring""" assert type(lowercase_ ) in (int, float) and decimal == int(lowercase_ ) A__ = int(lowercase_ ) A__ = '''''' A__ = False if decimal < 0: A__ = True decimal *= -1 while decimal > 0: A__ , A__ = divmod(lowercase_ , 16 ) A__ = values[remainder] + hexadecimal A__ = '''0x''' + hexadecimal if negative: A__ = '''-''' + hexadecimal return hexadecimal if __name__ == "__main__": import doctest doctest.testmod()
14
import requests from bsa import BeautifulSoup def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str: """simple docstring""" A__ = BeautifulSoup(requests.get(lowercase_ , params=lowercase_ ).content , '''html.parser''' ) A__ = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} ) A__ = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' ) return anchors[2].get_text() if __name__ == "__main__": _lowerCamelCase : Optional[Any] = { """title""": ( """Precisely geometry controlled microsupercapacitors for ultrahigh areal """ """capacitance, volumetric capacitance, and energy density""" ), """journal""": """Chem. Mater.""", """volume""": 30, """pages""": """3979-3990""", """year""": 2018, """hl""": """en""", } print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params))
14
1
from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer _lowerCamelCase : Tuple = logging.get_logger(__name__) _lowerCamelCase : Tuple = { """vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_config_file""": """tokenizer_config.json""", } _lowerCamelCase : Optional[Any] = { """vocab_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json""" }, """merges_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt""" }, """tokenizer_config_file""": { """facebook/blenderbot_small-90M""": ( """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json""" ) }, } _lowerCamelCase : str = { """facebook/blenderbot_small-90M""": 512, } class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = BlenderbotSmallTokenizer def __init__( self : List[str] , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Optional[Any]="<|endoftext|>" , UpperCAmelCase__ : Union[str, Any]="<|endoftext|>" , UpperCAmelCase__ : Any="<|endoftext|>" , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : int=True , **UpperCAmelCase__ : Dict , ) ->Tuple: '''simple docstring''' super().__init__( ByteLevelBPETokenizer( vocab=UpperCAmelCase__ , merges=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , ) , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , **UpperCAmelCase__ , ) A__ = add_prefix_space def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any]=None) ->Dict: '''simple docstring''' A__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None) ->List[int]: '''simple docstring''' A__ = [self.sep_token_id] A__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
14
import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[Any]: """simple docstring""" A__ = StableDiffusionPipeline.from_pretrained(lowercase_ , torch_dtype=torch.floataa ) # load LoRA weight from .safetensors A__ = load_file(lowercase_ ) A__ = [] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: A__ = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' ) A__ = pipeline.text_encoder else: A__ = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' ) A__ = pipeline.unet # find the target layer A__ = layer_infos.pop(0 ) while len(lowercase_ ) > -1: try: A__ = curr_layer.__getattr__(lowercase_ ) if len(lowercase_ ) > 0: A__ = layer_infos.pop(0 ) elif len(lowercase_ ) == 0: break except Exception: if len(lowercase_ ) > 0: temp_name += "_" + layer_infos.pop(0 ) else: A__ = layer_infos.pop(0 ) A__ = [] if "lora_down" in key: pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) ) pair_keys.append(lowercase_ ) else: pair_keys.append(lowercase_ ) pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) ) # update weight if len(state_dict[pair_keys[0]].shape ) == 4: A__ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) A__ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(lowercase_ , lowercase_ ).unsqueeze(2 ).unsqueeze(3 ) else: A__ = state_dict[pair_keys[0]].to(torch.floataa ) A__ = state_dict[pair_keys[1]].to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(lowercase_ , lowercase_ ) # update visited list for item in pair_keys: visited.append(lowercase_ ) return pipeline if __name__ == "__main__": _lowerCamelCase : Tuple = argparse.ArgumentParser() parser.add_argument( """--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format.""" ) parser.add_argument( """--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert.""" ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors""" ) parser.add_argument( """--lora_prefix_text_encoder""", default="""lora_te""", type=str, help="""The prefix of text encoder weight in safetensors""", ) parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""") parser.add_argument( """--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not.""" ) parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""") _lowerCamelCase : Tuple = parser.parse_args() _lowerCamelCase : List[Any] = args.base_model_path _lowerCamelCase : Optional[int] = args.checkpoint_path _lowerCamelCase : Dict = args.dump_path _lowerCamelCase : Optional[Any] = args.lora_prefix_unet _lowerCamelCase : Optional[int] = args.lora_prefix_text_encoder _lowerCamelCase : List[Any] = args.alpha _lowerCamelCase : int = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) _lowerCamelCase : Tuple = pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
14
1
from unittest.mock import patch import pyspark from datasets.packaged_modules.spark.spark import ( Spark, SparkExamplesIterable, _generate_iterable_examples, ) from ..utils import ( require_dill_gt_0_3_2, require_not_windows, ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Tuple: """simple docstring""" A__ = [] for part_id in partition_order: A__ = df.where(f"""SPARK_PARTITION_ID() = {part_id}""" ).collect() for row_idx, row in enumerate(lowercase_ ): expected_row_ids_and_row_dicts.append((f"""{part_id}_{row_idx}""", row.asDict()) ) return expected_row_ids_and_row_dicts @require_not_windows @require_dill_gt_0_3_2 def SCREAMING_SNAKE_CASE ( ) -> str: """simple docstring""" A__ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate() A__ = spark.range(100 ).repartition(1 ) A__ = Spark(lowercase_ ) # The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means # that each partition can hold 2 rows. spark_builder._repartition_df_if_needed(max_shard_size=16 ) # Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions. assert spark_builder.df.rdd.getNumPartitions() == 50 @require_not_windows @require_dill_gt_0_3_2 def SCREAMING_SNAKE_CASE ( ) -> Dict: """simple docstring""" A__ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate() A__ = spark.range(10 ).repartition(2 ) A__ = [1, 0] A__ = _generate_iterable_examples(lowercase_ , lowercase_ ) # Reverse the partitions. A__ = _get_expected_row_ids_and_row_dicts_for_partition_order(lowercase_ , lowercase_ ) for i, (row_id, row_dict) in enumerate(generate_fn() ): A__ , A__ = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def SCREAMING_SNAKE_CASE ( ) -> Dict: """simple docstring""" A__ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate() A__ = spark.range(10 ).repartition(1 ) A__ = SparkExamplesIterable(lowercase_ ) assert it.n_shards == 1 for i, (row_id, row_dict) in enumerate(lowercase_ ): assert row_id == f"""0_{i}""" assert row_dict == {"id": i} @require_not_windows @require_dill_gt_0_3_2 def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]: """simple docstring""" A__ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate() A__ = spark.range(30 ).repartition(3 ) # Mock the generator so that shuffle reverses the partition indices. with patch('''numpy.random.Generator''' ) as generator_mock: A__ = lambda lowercase_ : x.reverse() A__ = _get_expected_row_ids_and_row_dicts_for_partition_order(lowercase_ , [2, 1, 0] ) A__ = SparkExamplesIterable(lowercase_ ).shuffle_data_sources(lowercase_ ) assert shuffled_it.n_shards == 3 for i, (row_id, row_dict) in enumerate(lowercase_ ): A__ , A__ = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def SCREAMING_SNAKE_CASE ( ) -> Tuple: """simple docstring""" A__ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate() A__ = spark.range(20 ).repartition(4 ) # Partitions 0 and 2 A__ = SparkExamplesIterable(lowercase_ ).shard_data_sources(worker_id=0 , num_workers=2 ) assert shard_it_a.n_shards == 2 A__ = _get_expected_row_ids_and_row_dicts_for_partition_order(lowercase_ , [0, 2] ) for i, (row_id, row_dict) in enumerate(lowercase_ ): A__ , A__ = expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict # Partitions 1 and 3 A__ = SparkExamplesIterable(lowercase_ ).shard_data_sources(worker_id=1 , num_workers=2 ) assert shard_it_a.n_shards == 2 A__ = _get_expected_row_ids_and_row_dicts_for_partition_order(lowercase_ , [1, 3] ) for i, (row_id, row_dict) in enumerate(lowercase_ ): A__ , A__ = expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def SCREAMING_SNAKE_CASE ( ) -> str: """simple docstring""" A__ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate() A__ = spark.range(100 ).repartition(1 ) A__ = Spark(lowercase_ ) # Choose a small max_shard_size for maximum partitioning. spark_builder._repartition_df_if_needed(max_shard_size=1 ) # The new number of partitions should not be greater than the number of rows. assert spark_builder.df.rdd.getNumPartitions() == 100
14
import os import pytest from transformers.dynamic_module_utils import get_imports _lowerCamelCase : Any = """ import os """ _lowerCamelCase : Optional[int] = """ def foo(): import os return False """ _lowerCamelCase : List[Any] = """ def foo(): def bar(): if True: import os return False return bar() """ _lowerCamelCase : List[Any] = """ import os try: import bar except ImportError: raise ValueError() """ _lowerCamelCase : Union[str, Any] = """ import os def foo(): try: import bar except ImportError: raise ValueError() """ _lowerCamelCase : List[Any] = """ import os try: import bar except (ImportError, AttributeError): raise ValueError() """ _lowerCamelCase : List[Any] = """ import os try: import bar except ImportError as e: raise ValueError() """ _lowerCamelCase : str = """ import os try: import bar except: raise ValueError() """ _lowerCamelCase : Optional[Any] = """ import os try: import bar import baz except ImportError: raise ValueError() """ _lowerCamelCase : Any = """ import os try: import bar import baz except ImportError: x = 1 raise ValueError() """ _lowerCamelCase : Dict = [ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize('''case''' , lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int: """simple docstring""" A__ = os.path.join(lowercase_ , '''test_file.py''' ) with open(lowercase_ , '''w''' ) as _tmp_file: _tmp_file.write(lowercase_ ) A__ = get_imports(lowercase_ ) assert parsed_imports == ["os"]
14
1
from random import randint, random def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ = False , lowercase_ = False , lowercase_ = 5 , ) -> list: """simple docstring""" A__ = [[-1] * number_of_cells] # Create a highway without any car A__ = 0 A__ = max(lowercase_ , 0 ) while i < number_of_cells: A__ = ( randint(0 , lowercase_ ) if random_speed else initial_speed ) # Place the cars i += ( randint(1 , max_speed * 2 ) if random_frequency else frequency ) # Arbitrary number, may need tuning return highway def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int: """simple docstring""" A__ = 0 A__ = highway_now[car_index + 1 :] for cell in range(len(lowercase_ ) ): # May need a better name for this if cells[cell] != -1: # If the cell is not empty then return distance # we have the distance we wanted distance += 1 # Here if the car is near the end of the highway return distance + get_distance(lowercase_ , -1 ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> list: """simple docstring""" A__ = len(lowercase_ ) # Beforce calculations, the highway is empty A__ = [-1] * number_of_cells for car_index in range(lowercase_ ): if highway_now[car_index] != -1: # Add 1 to the current speed of the car and cap the speed A__ = min(highway_now[car_index] + 1 , lowercase_ ) # Number of empty cell before the next car A__ = get_distance(lowercase_ , lowercase_ ) - 1 # We can't have the car causing an accident A__ = min(next_highway[car_index] , lowercase_ ) if random() < probability: # Randomly, a driver will slow down A__ = max(next_highway[car_index] - 1 , 0 ) return next_highway def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> list: """simple docstring""" A__ = len(highway[0] ) for i in range(lowercase_ ): A__ = update(highway[i] , lowercase_ , lowercase_ ) A__ = [-1] * number_of_cells for car_index in range(lowercase_ ): A__ = next_speeds_calculated[car_index] if speed != -1: # Change the position based on the speed (with % to create the loop) A__ = (car_index + speed) % number_of_cells # Commit the change of position A__ = speed highway.append(lowercase_ ) return highway if __name__ == "__main__": import doctest doctest.testmod()
14
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int: """simple docstring""" return int(input_a == input_a == 0 ) def SCREAMING_SNAKE_CASE ( ) -> None: """simple docstring""" print('''Truth Table of NOR Gate:''' ) print('''| Input 1 | Input 2 | Output |''' ) print(f"""| 0 | 0 | {nor_gate(0 , 0 )} |""" ) print(f"""| 0 | 1 | {nor_gate(0 , 1 )} |""" ) print(f"""| 1 | 0 | {nor_gate(1 , 0 )} |""" ) print(f"""| 1 | 1 | {nor_gate(1 , 1 )} |""" ) if __name__ == "__main__": import doctest doctest.testmod() main()
14
1
import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor _lowerCamelCase : Dict = logging.get_logger(__name__) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : Tuple , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Optional[int]) ->None: '''simple docstring''' warnings.warn( '''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use CLIPImageProcessor instead.''' , UpperCAmelCase__ , ) super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__)
14
import os import sys import unittest _lowerCamelCase : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) _lowerCamelCase : Any = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""") _lowerCamelCase : str = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""") class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE ( self : Tuple) ->Tuple: '''simple docstring''' A__ = get_test_to_tester_mapping(UpperCAmelCase__) A__ = get_test_to_tester_mapping(UpperCAmelCase__) A__ = {'''BertModelTest''': '''BertModelTester'''} A__ = { '''BlipModelTest''': '''BlipModelTester''', '''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''', '''BlipTextModelTest''': '''BlipTextModelTester''', '''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''', '''BlipVQAModelTest''': '''BlipVQAModelTester''', '''BlipVisionModelTest''': '''BlipVisionModelTester''', } self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__) self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[Any]: '''simple docstring''' A__ = get_model_to_test_mapping(UpperCAmelCase__) A__ = get_model_to_test_mapping(UpperCAmelCase__) A__ = { '''BertForMaskedLM''': ['''BertModelTest'''], '''BertForMultipleChoice''': ['''BertModelTest'''], '''BertForNextSentencePrediction''': ['''BertModelTest'''], '''BertForPreTraining''': ['''BertModelTest'''], '''BertForQuestionAnswering''': ['''BertModelTest'''], '''BertForSequenceClassification''': ['''BertModelTest'''], '''BertForTokenClassification''': ['''BertModelTest'''], '''BertLMHeadModel''': ['''BertModelTest'''], '''BertModel''': ['''BertModelTest'''], } A__ = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''], '''BlipModel''': ['''BlipModelTest'''], '''BlipTextModel''': ['''BlipTextModelTest'''], '''BlipVisionModel''': ['''BlipVisionModelTest'''], } self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__) self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->str: '''simple docstring''' A__ = get_model_to_tester_mapping(UpperCAmelCase__) A__ = get_model_to_tester_mapping(UpperCAmelCase__) A__ = { '''BertForMaskedLM''': ['''BertModelTester'''], '''BertForMultipleChoice''': ['''BertModelTester'''], '''BertForNextSentencePrediction''': ['''BertModelTester'''], '''BertForPreTraining''': ['''BertModelTester'''], '''BertForQuestionAnswering''': ['''BertModelTester'''], '''BertForSequenceClassification''': ['''BertModelTester'''], '''BertForTokenClassification''': ['''BertModelTester'''], '''BertLMHeadModel''': ['''BertModelTester'''], '''BertModel''': ['''BertModelTester'''], } A__ = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''], '''BlipModel''': ['''BlipModelTester'''], '''BlipTextModel''': ['''BlipTextModelTester'''], '''BlipVisionModel''': ['''BlipVisionModelTester'''], } self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__) self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
14
1
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int: """simple docstring""" A__ = [[0 for _ in range(lowercase_ )] for _ in range(m + 1 )] for i in range(m + 1 ): A__ = 1 for n in range(m + 1 ): for k in range(1 , lowercase_ ): memo[n][k] += memo[n][k - 1] if n - k > 0: memo[n][k] += memo[n - k - 1][k] return memo[m][m - 1] if __name__ == "__main__": import sys if len(sys.argv) == 1: try: _lowerCamelCase : str = int(input("""Enter a number: """).strip()) print(partition(n)) except ValueError: print("""Please enter a number.""") else: try: _lowerCamelCase : Any = int(sys.argv[1]) print(partition(n)) except ValueError: print("""Please pass a number.""")
14
import inspect import unittest from typing import List import numpy as np from transformers import EfficientFormerConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, ) from transformers.models.efficientformer.modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import EfficientFormerImageProcessor class UpperCamelCase_ : '''simple docstring''' def __init__( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int = 13 , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : int = 128 , UpperCAmelCase__ : Optional[Any]=[16, 32, 64, 128] , UpperCAmelCase__ : int = 7 , UpperCAmelCase__ : int = 4 , UpperCAmelCase__ : int = 37 , UpperCAmelCase__ : str = "gelu" , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : int = 10 , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : int = 128 , UpperCAmelCase__ : List[int] = [2, 2, 2, 2] , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , ) ->List[Any]: '''simple docstring''' A__ = parent A__ = batch_size A__ = image_size A__ = patch_size A__ = num_channels A__ = is_training A__ = use_labels A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = type_sequence_label_size A__ = initializer_range A__ = encoder_stride A__ = num_attention_outputs A__ = embed_dim A__ = embed_dim + 1 A__ = resolution A__ = depths A__ = hidden_sizes A__ = dim A__ = mlp_expansion_ratio def SCREAMING_SNAKE_CASE ( self : List[Any]) ->str: '''simple docstring''' A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size) A__ = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE ( self : int) ->str: '''simple docstring''' return EfficientFormerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict) ->Dict: '''simple docstring''' A__ = TFEfficientFormerModel(config=UpperCAmelCase__) A__ = model(UpperCAmelCase__ , training=UpperCAmelCase__) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str) ->Union[str, Any]: '''simple docstring''' A__ = self.type_sequence_label_size A__ = TFEfficientFormerForImageClassification(UpperCAmelCase__) A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__ , training=UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) # test greyscale images A__ = 1 A__ = TFEfficientFormerForImageClassification(UpperCAmelCase__) A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def SCREAMING_SNAKE_CASE ( self : int) ->List[str]: '''simple docstring''' A__ = self.prepare_config_and_inputs() A__ , A__ , A__ = config_and_inputs A__ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = ( ( TFEfficientFormerModel, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerForImageClassification, ) if is_tf_available() else () ) UpperCAmelCase__ = ( { '''feature-extraction''': TFEfficientFormerModel, '''image-classification''': ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, ), } if is_tf_available() else {} ) UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[str]: '''simple docstring''' A__ = TFEfficientFormerModelTester(self) A__ = ConfigTester( self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37) def SCREAMING_SNAKE_CASE ( self : int) ->Any: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''EfficientFormer does not use inputs_embeds''') def SCREAMING_SNAKE_CASE ( self : List[str]) ->Dict: '''simple docstring''' pass @unittest.skip(reason='''EfficientFormer does not support input and output embeddings''') def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]: '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[Any]: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(UpperCAmelCase__) A__ = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ = [*signature.parameters.keys()] A__ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : str) ->Any: '''simple docstring''' def check_hidden_states_output(UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict): A__ = model_class(UpperCAmelCase__) A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__) A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A__ = getattr( self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1) self.assertEqual(len(UpperCAmelCase__) , UpperCAmelCase__) if hasattr(self.model_tester , '''encoder_seq_length'''): A__ = self.model_tester.encoder_seq_length if hasattr(self.model_tester , '''chunk_length''') and self.model_tester.chunk_length > 1: A__ = seq_length * self.model_tester.chunk_length else: A__ = self.model_tester.seq_length self.assertListEqual( list(hidden_states[-1].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , ) if config.is_encoder_decoder: A__ = outputs.decoder_hidden_states self.asseretIsInstance(UpperCAmelCase__ , (list, tuple)) self.assertEqual(len(UpperCAmelCase__) , UpperCAmelCase__) A__ = getattr(self.model_tester , '''seq_length''' , UpperCAmelCase__) A__ = getattr(self.model_tester , '''decoder_seq_length''' , UpperCAmelCase__) self.assertListEqual( list(hidden_states[-1].shape[-2:]) , [decoder_seq_length, self.model_tester.hidden_size] , ) A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = True check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A__ = True check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict=False) ->int: '''simple docstring''' A__ = super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__) if return_labels: if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__) @unittest.skip(reason='''EfficientFormer does not implement masked image modeling yet''') def SCREAMING_SNAKE_CASE ( self : str) ->str: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Any) ->Tuple: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__) @slow def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]: '''simple docstring''' for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ = TFEfficientFormerModel.from_pretrained(UpperCAmelCase__) self.assertIsNotNone(UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Any) ->str: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = True A__ = getattr(self.model_tester , '''seq_length''' , UpperCAmelCase__) A__ = getattr(self.model_tester , '''encoder_seq_length''' , UpperCAmelCase__) A__ = getattr(self.model_tester , '''key_length''' , UpperCAmelCase__) A__ = getattr(self.model_tester , '''chunk_length''' , UpperCAmelCase__) if chunk_length is not None and hasattr(self.model_tester , '''num_hashes'''): A__ = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: A__ = True A__ = False A__ = True A__ = model_class(UpperCAmelCase__) A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__) A__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(UpperCAmelCase__) , self.model_tester.num_attention_outputs) # check that output_attentions also work using config del inputs_dict["output_attentions"] A__ = True A__ = model_class(UpperCAmelCase__) A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__) A__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(UpperCAmelCase__) , self.model_tester.num_attention_outputs) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:]) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , ) else: self.assertListEqual( list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , ) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[Any]: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Prepare our model A__ = model_class(UpperCAmelCase__) # These are maximally general inputs for the model, with multiple None dimensions # Hopefully this will catch any conditionals that fail for flexible shapes A__ = { key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=UpperCAmelCase__) for key, val in model.input_signature.items() if key in model.dummy_inputs } A__ = model(UpperCAmelCase__) self.assertTrue(outputs_dict is not None) def SCREAMING_SNAKE_CASE ( ) -> Any: """simple docstring""" A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: '''simple docstring''' return ( EfficientFormerImageProcessor.from_pretrained('''snap-research/efficientformer-l1-300''') if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE ( self : List[str]) ->Any: '''simple docstring''' A__ = TFEfficientFormerForImageClassification.from_pretrained('''snap-research/efficientformer-l1-300''') A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(images=UpperCAmelCase__ , return_tensors='''tf''') # forward pass A__ = model(**UpperCAmelCase__ , training=UpperCAmelCase__) # verify the logits A__ = tf.TensorShape((1, 1_000)) self.assertEqual(outputs.logits.shape , UpperCAmelCase__) A__ = tf.constant([-0.0555, 0.4825, -0.0852]) self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4)) @slow def SCREAMING_SNAKE_CASE ( self : Dict) ->int: '''simple docstring''' A__ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained( '''snap-research/efficientformer-l1-300''') A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(images=UpperCAmelCase__ , return_tensors='''tf''') # forward pass A__ = model(**UpperCAmelCase__ , training=UpperCAmelCase__) # verify the logits A__ = tf.TensorShape((1, 1_000)) self.assertEqual(outputs.logits.shape , UpperCAmelCase__) A__ = tf.constant([-0.1312, 0.4353, -1.0499]) self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4))
14
1
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : str) ->Optional[Any]: '''simple docstring''' for model_result in results.values(): for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss''']): A__ = model_result['''result'''][batch_size][sequence_length] self.assertIsNotNone(UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Tuple) ->Tuple: '''simple docstring''' A__ = '''sshleifer/tiny-gpt2''' A__ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase__ , ) A__ = PyTorchBenchmark(UpperCAmelCase__) A__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[Any]: '''simple docstring''' A__ = '''sgugger/tiny-distilbert-classification''' A__ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase__ , only_pretrain_model=UpperCAmelCase__ , ) A__ = PyTorchBenchmark(UpperCAmelCase__) A__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[str]: '''simple docstring''' A__ = '''sshleifer/tiny-gpt2''' A__ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , torchscript=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase__ , ) A__ = PyTorchBenchmark(UpperCAmelCase__) A__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) @unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''') def SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]: '''simple docstring''' A__ = '''sshleifer/tiny-gpt2''' A__ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , fpaa=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase__ , ) A__ = PyTorchBenchmark(UpperCAmelCase__) A__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def SCREAMING_SNAKE_CASE ( self : Any) ->int: '''simple docstring''' A__ = '''sshleifer/tiny-gpt2''' A__ = AutoConfig.from_pretrained(UpperCAmelCase__) # set architectures equal to `None` A__ = None A__ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase__ , ) A__ = PyTorchBenchmark(UpperCAmelCase__ , configs=[config]) A__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[int]: '''simple docstring''' A__ = '''sshleifer/tiny-gpt2''' A__ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase__ , ) A__ = PyTorchBenchmark(UpperCAmelCase__) A__ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) @unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''') def SCREAMING_SNAKE_CASE ( self : Dict) ->Dict: '''simple docstring''' A__ = '''sshleifer/tiny-gpt2''' A__ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=UpperCAmelCase__ , multi_process=UpperCAmelCase__ , ) A__ = PyTorchBenchmark(UpperCAmelCase__) A__ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def SCREAMING_SNAKE_CASE ( self : str) ->Tuple: '''simple docstring''' A__ = '''sshleifer/tiny-gpt2''' A__ = AutoConfig.from_pretrained(UpperCAmelCase__) A__ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase__ , ) A__ = PyTorchBenchmark(UpperCAmelCase__ , configs=[config]) A__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[Any]: '''simple docstring''' A__ = '''sshleifer/tinier_bart''' A__ = AutoConfig.from_pretrained(UpperCAmelCase__) A__ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase__ , ) A__ = PyTorchBenchmark(UpperCAmelCase__ , configs=[config]) A__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def SCREAMING_SNAKE_CASE ( self : str) ->Any: '''simple docstring''' A__ = '''sshleifer/tiny-gpt2''' A__ = AutoConfig.from_pretrained(UpperCAmelCase__) A__ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase__ , ) A__ = PyTorchBenchmark(UpperCAmelCase__ , configs=[config]) A__ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def SCREAMING_SNAKE_CASE ( self : int) ->Tuple: '''simple docstring''' A__ = '''sshleifer/tinier_bart''' A__ = AutoConfig.from_pretrained(UpperCAmelCase__) A__ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase__ , ) A__ = PyTorchBenchmark(UpperCAmelCase__ , configs=[config]) A__ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[Any]: '''simple docstring''' A__ = '''sshleifer/tiny-gpt2''' with tempfile.TemporaryDirectory() as tmp_dir: A__ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , save_to_csv=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(UpperCAmelCase__ , '''inf_time.csv''') , train_memory_csv_file=os.path.join(UpperCAmelCase__ , '''train_mem.csv''') , inference_memory_csv_file=os.path.join(UpperCAmelCase__ , '''inf_mem.csv''') , train_time_csv_file=os.path.join(UpperCAmelCase__ , '''train_time.csv''') , env_info_csv_file=os.path.join(UpperCAmelCase__ , '''env.csv''') , multi_process=UpperCAmelCase__ , ) A__ = PyTorchBenchmark(UpperCAmelCase__) benchmark.run() self.assertTrue(Path(os.path.join(UpperCAmelCase__ , '''inf_time.csv''')).exists()) self.assertTrue(Path(os.path.join(UpperCAmelCase__ , '''train_time.csv''')).exists()) self.assertTrue(Path(os.path.join(UpperCAmelCase__ , '''inf_mem.csv''')).exists()) self.assertTrue(Path(os.path.join(UpperCAmelCase__ , '''train_mem.csv''')).exists()) self.assertTrue(Path(os.path.join(UpperCAmelCase__ , '''env.csv''')).exists()) def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->str: '''simple docstring''' A__ = '''sshleifer/tiny-gpt2''' def _check_summary_is_not_empty(UpperCAmelCase__ : Tuple): self.assertTrue(hasattr(UpperCAmelCase__ , '''sequential''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''cumulative''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''current''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''total''')) with tempfile.TemporaryDirectory() as tmp_dir: A__ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(UpperCAmelCase__ , '''log.txt''') , log_print=UpperCAmelCase__ , trace_memory_line_by_line=UpperCAmelCase__ , multi_process=UpperCAmelCase__ , ) A__ = PyTorchBenchmark(UpperCAmelCase__) A__ = benchmark.run() _check_summary_is_not_empty(result.inference_summary) _check_summary_is_not_empty(result.train_summary) self.assertTrue(Path(os.path.join(UpperCAmelCase__ , '''log.txt''')).exists())
14
from __future__ import annotations def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> tuple[float, list[float]]: """simple docstring""" A__ = list(range(len(lowercase_ ) ) ) A__ = [v / w for v, w in zip(lowercase_ , lowercase_ )] index.sort(key=lambda lowercase_ : ratio[i] , reverse=lowercase_ ) A__ = 0 A__ = [0] * len(lowercase_ ) for i in index: if weight[i] <= capacity: A__ = 1 max_value += value[i] capacity -= weight[i] else: A__ = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
14
1
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING _lowerCamelCase : List[Any] = logging.get_logger(__name__) _lowerCamelCase : int = { """salesforce/blip2-opt-2.7b""": """https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json""", } class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''blip_2_vision_model''' def __init__( self : Optional[Any] , UpperCAmelCase__ : Any=1_408 , UpperCAmelCase__ : Optional[int]=6_144 , UpperCAmelCase__ : List[str]=39 , UpperCAmelCase__ : str=16 , UpperCAmelCase__ : int=224 , UpperCAmelCase__ : List[str]=14 , UpperCAmelCase__ : Any="gelu" , UpperCAmelCase__ : Dict=0.00001 , UpperCAmelCase__ : str=0.0 , UpperCAmelCase__ : Dict=1e-10 , UpperCAmelCase__ : Optional[int]=True , **UpperCAmelCase__ : Union[str, Any] , ) ->Tuple: '''simple docstring''' super().__init__(**UpperCAmelCase__) A__ = hidden_size A__ = intermediate_size A__ = num_hidden_layers A__ = num_attention_heads A__ = patch_size A__ = image_size A__ = initializer_range A__ = attention_dropout A__ = layer_norm_eps A__ = hidden_act A__ = qkv_bias @classmethod def SCREAMING_SNAKE_CASE ( cls : Optional[int] , UpperCAmelCase__ : Union[str, os.PathLike] , **UpperCAmelCase__ : Tuple) ->"PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(UpperCAmelCase__) A__ , A__ = cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__) # get the vision config dict if we are loading from Blip2Config if config_dict.get('''model_type''') == "blip-2": A__ = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""") return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''blip_2_qformer''' def __init__( self : Optional[int] , UpperCAmelCase__ : List[str]=30_522 , UpperCAmelCase__ : Union[str, Any]=768 , UpperCAmelCase__ : List[Any]=12 , UpperCAmelCase__ : str=12 , UpperCAmelCase__ : Any=3_072 , UpperCAmelCase__ : int="gelu" , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Tuple=512 , UpperCAmelCase__ : Optional[Any]=0.02 , UpperCAmelCase__ : Tuple=1e-12 , UpperCAmelCase__ : Optional[int]=0 , UpperCAmelCase__ : Union[str, Any]="absolute" , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : str=1_408 , **UpperCAmelCase__ : Optional[int] , ) ->Union[str, Any]: '''simple docstring''' super().__init__(pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__) A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = hidden_act A__ = intermediate_size A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = initializer_range A__ = layer_norm_eps A__ = position_embedding_type A__ = cross_attention_frequency A__ = encoder_hidden_size @classmethod def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , UpperCAmelCase__ : Union[str, os.PathLike] , **UpperCAmelCase__ : str) ->"PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(UpperCAmelCase__) A__ , A__ = cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__) # get the qformer config dict if we are loading from Blip2Config if config_dict.get('''model_type''') == "blip-2": A__ = config_dict['''qformer_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""") return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''blip-2''' UpperCAmelCase__ = True def __init__( self : Union[str, Any] , UpperCAmelCase__ : str=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : str=32 , **UpperCAmelCase__ : Tuple) ->List[str]: '''simple docstring''' super().__init__(**UpperCAmelCase__) if vision_config is None: A__ = {} logger.info('''vision_config is None. initializing the Blip2VisionConfig with default values.''') if qformer_config is None: A__ = {} logger.info('''qformer_config is None. Initializing the Blip2QFormerConfig with default values.''') if text_config is None: A__ = {} logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''') A__ = BlipaVisionConfig(**UpperCAmelCase__) A__ = BlipaQFormerConfig(**UpperCAmelCase__) A__ = text_config['''model_type'''] if '''model_type''' in text_config else '''opt''' A__ = CONFIG_MAPPING[text_model_type](**UpperCAmelCase__) A__ = self.text_config.tie_word_embeddings A__ = self.text_config.is_encoder_decoder A__ = num_query_tokens A__ = self.vision_config.hidden_size A__ = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES A__ = 1.0 A__ = 0.02 @classmethod def SCREAMING_SNAKE_CASE ( cls : int , UpperCAmelCase__ : BlipaVisionConfig , UpperCAmelCase__ : BlipaQFormerConfig , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : Dict , ) ->Union[str, Any]: '''simple docstring''' return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCAmelCase__ , ) def SCREAMING_SNAKE_CASE ( self : str) ->Any: '''simple docstring''' A__ = copy.deepcopy(self.__dict__) A__ = self.vision_config.to_dict() A__ = self.qformer_config.to_dict() A__ = self.text_config.to_dict() A__ = self.__class__.model_type return output
14
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[Any]: """simple docstring""" A__ = args.log_outputs A__ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] ) # load metric A__ = load_metric('''wer''' ) A__ = load_metric('''cer''' ) # compute metrics A__ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] ) A__ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] ) # print & log results A__ = f"""WER: {wer_result}\nCER: {cer_result}""" print(lowercase_ ) with open(f"""{dataset_id}_eval_results.txt""" , '''w''' ) as f: f.write(lowercase_ ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: A__ = f"""log_{dataset_id}_predictions.txt""" A__ = f"""log_{dataset_id}_targets.txt""" with open(lowercase_ , '''w''' ) as p, open(lowercase_ , '''w''' ) as t: # mapping function to write output def write_to_file(lowercase_ , lowercase_ ): p.write(f"""{i}""" + '''\n''' ) p.write(batch['''prediction'''] + '''\n''' ) t.write(f"""{i}""" + '''\n''' ) t.write(batch['''target'''] + '''\n''' ) result.map(lowercase_ , with_indices=lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> str: """simple docstring""" A__ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training A__ = re.sub(lowercase_ , '''''' , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! A__ = ['''\n\n''', '''\n''', ''' ''', ''' '''] for t in token_sequences_to_ignore: A__ = ''' '''.join(text.split(lowercase_ ) ) return text def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]: """simple docstring""" A__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowercase_ ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor A__ = AutoFeatureExtractor.from_pretrained(args.model_id ) A__ = feature_extractor.sampling_rate # resample audio A__ = dataset.cast_column('''audio''' , Audio(sampling_rate=lowercase_ ) ) # load eval pipeline if args.device is None: A__ = 0 if torch.cuda.is_available() else -1 A__ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(lowercase_ ): A__ = asr( batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) A__ = prediction['''text'''] A__ = normalize_text(batch['''sentence'''] ) return batch # run inference on all examples A__ = dataset.map(lowercase_ , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(lowercase_ , lowercase_ ) if __name__ == "__main__": _lowerCamelCase : List[Any] = argparse.ArgumentParser() parser.add_argument( """--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers""" ) parser.add_argument( """--dataset""", type=str, required=True, help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""", ) parser.add_argument( """--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice""" ) parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""") parser.add_argument( """--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds.""" ) parser.add_argument( """--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second.""" ) parser.add_argument( """--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis.""" ) parser.add_argument( """--device""", type=int, default=None, help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""", ) _lowerCamelCase : str = parser.parse_args() main(args)
14
1
_lowerCamelCase : Optional[int] = 65521 def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int: """simple docstring""" A__ = 1 A__ = 0 for plain_chr in plain_text: A__ = (a + ord(lowercase_ )) % MOD_ADLER A__ = (b + a) % MOD_ADLER return (b << 16) | a
14
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) _lowerCamelCase : int = { """configuration_blip""": [ """BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BlipConfig""", """BlipTextConfig""", """BlipVisionConfig""", ], """processing_blip""": ["""BlipProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Tuple = ["""BlipImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : List[Any] = [ """BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """BlipModel""", """BlipPreTrainedModel""", """BlipForConditionalGeneration""", """BlipForQuestionAnswering""", """BlipVisionModel""", """BlipTextModel""", """BlipForImageTextRetrieval""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Optional[Any] = [ """TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFBlipModel""", """TFBlipPreTrainedModel""", """TFBlipForConditionalGeneration""", """TFBlipForQuestionAnswering""", """TFBlipVisionModel""", """TFBlipTextModel""", """TFBlipForImageTextRetrieval""", ] if TYPE_CHECKING: from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig from .processing_blip import BlipProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_blip import BlipImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip import ( BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipPreTrainedModel, BlipTextModel, BlipVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blip import ( TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, TFBlipModel, TFBlipPreTrainedModel, TFBlipTextModel, TFBlipVisionModel, ) else: import sys _lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
14
1
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING _lowerCamelCase : List[str] = logging.get_logger(__name__) @add_end_docstrings(UpperCAmelCase__ ) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : Union[str, Any] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : Union[str, Any]) ->Optional[int]: '''simple docstring''' super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__) requires_backends(self , '''vision''') self.check_model_type( TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING if self.framework == '''tf''' else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Dict=None) ->Union[str, Any]: '''simple docstring''' A__ = {} if top_k is not None: A__ = top_k return {}, {}, postprocess_params def __call__( self : int , UpperCAmelCase__ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **UpperCAmelCase__ : Optional[int]) ->int: '''simple docstring''' return super().__call__(UpperCAmelCase__ , **UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : str) ->Union[str, Any]: '''simple docstring''' A__ = load_image(UpperCAmelCase__) A__ = self.image_processor(images=UpperCAmelCase__ , return_tensors=self.framework) return model_inputs def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : Tuple) ->int: '''simple docstring''' A__ = self.model(**UpperCAmelCase__) return model_outputs def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int=5) ->List[str]: '''simple docstring''' if top_k > self.model.config.num_labels: A__ = self.model.config.num_labels if self.framework == "pt": A__ = model_outputs.logits.softmax(-1)[0] A__ , A__ = probs.topk(UpperCAmelCase__) elif self.framework == "tf": A__ = stable_softmax(model_outputs.logits , axis=-1)[0] A__ = tf.math.top_k(UpperCAmelCase__ , k=UpperCAmelCase__) A__ , A__ = topk.values.numpy(), topk.indices.numpy() else: raise ValueError(f"""Unsupported framework: {self.framework}""") A__ = scores.tolist() A__ = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCAmelCase__ , UpperCAmelCase__)]
14
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCamelCase : List[str] = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : List[Any] = [ """VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""", """ViTMSNModel""", """ViTMSNForImageClassification""", """ViTMSNPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_msn import ( VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, ) else: import sys _lowerCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
14
1
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase : Optional[int] = logging.get_logger(__name__) _lowerCamelCase : Optional[Any] = { """microsoft/swinv2-tiny-patch4-window8-256""": ( """https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json""" ), } class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''swinv2''' UpperCAmelCase__ = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self : Union[str, Any] , UpperCAmelCase__ : int=224 , UpperCAmelCase__ : Optional[int]=4 , UpperCAmelCase__ : int=3 , UpperCAmelCase__ : Any=96 , UpperCAmelCase__ : Union[str, Any]=[2, 2, 6, 2] , UpperCAmelCase__ : Union[str, Any]=[3, 6, 12, 24] , UpperCAmelCase__ : Any=7 , UpperCAmelCase__ : Tuple=4.0 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str=0.0 , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : Tuple="gelu" , UpperCAmelCase__ : Tuple=False , UpperCAmelCase__ : List[Any]=0.02 , UpperCAmelCase__ : Tuple=1e-5 , UpperCAmelCase__ : Optional[Any]=32 , **UpperCAmelCase__ : int , ) ->Any: '''simple docstring''' super().__init__(**UpperCAmelCase__) A__ = image_size A__ = patch_size A__ = num_channels A__ = embed_dim A__ = depths A__ = len(UpperCAmelCase__) A__ = num_heads A__ = window_size A__ = mlp_ratio A__ = qkv_bias A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = drop_path_rate A__ = hidden_act A__ = use_absolute_embeddings A__ = layer_norm_eps A__ = initializer_range A__ = encoder_stride # we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model A__ = int(embed_dim * 2 ** (len(UpperCAmelCase__) - 1)) A__ = (0, 0, 0, 0)
14
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> "list[int]": """simple docstring""" if upper_limit < 0: raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' ) A__ = [0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 A__ = 1 if upper_limit > 0: A__ = 1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 , upper_limit + 1 ): for j in range(lowercase_ ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""") print("""\n*** Enter -1 at any time to quit ***""") print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""") try: while True: _lowerCamelCase : List[Any] = int(input().strip()) if N < 0: print("""\n********* Goodbye!! ************""") break else: print(F'''The Catalan numbers from 0 through {N} are:''') print(catalan_numbers(N)) print("""Try another upper limit for the sequence: """, end="""""") except (NameError, ValueError): print("""\n********* Invalid input, goodbye! ************\n""") import doctest doctest.testmod()
14
1
import warnings from ...utils import logging from .image_processing_poolformer import PoolFormerImageProcessor _lowerCamelCase : List[Any] = logging.get_logger(__name__) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : Optional[Any] , *UpperCAmelCase__ : int , **UpperCAmelCase__ : List[Any]) ->None: '''simple docstring''' warnings.warn( '''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use PoolFormerImageProcessor instead.''' , UpperCAmelCase__ , ) super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__)
14
import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict: """simple docstring""" A__ = args.pruning_method A__ = args.threshold A__ = args.model_name_or_path.rstrip('''/''' ) A__ = args.target_model_path print(f"""Load fine-pruned model from {model_name_or_path}""" ) A__ = torch.load(os.path.join(lowercase_ , '''pytorch_model.bin''' ) ) A__ = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: A__ = tensor print(f"""Copied layer {name}""" ) elif "classifier" in name or "qa_output" in name: A__ = tensor print(f"""Copied layer {name}""" ) elif "bias" in name: A__ = tensor print(f"""Copied layer {name}""" ) else: if pruning_method == "magnitude": A__ = MagnitudeBinarizer.apply(inputs=lowercase_ , threshold=lowercase_ ) A__ = tensor * mask print(f"""Pruned layer {name}""" ) elif pruning_method == "topK": if "mask_scores" in name: continue A__ = name[:-6] A__ = model[f"""{prefix_}mask_scores"""] A__ = TopKBinarizer.apply(lowercase_ , lowercase_ ) A__ = tensor * mask print(f"""Pruned layer {name}""" ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue A__ = name[:-6] A__ = model[f"""{prefix_}mask_scores"""] A__ = ThresholdBinarizer.apply(lowercase_ , lowercase_ , lowercase_ ) A__ = tensor * mask print(f"""Pruned layer {name}""" ) elif pruning_method == "l0": if "mask_scores" in name: continue A__ = name[:-6] A__ = model[f"""{prefix_}mask_scores"""] A__ , A__ = -0.1, 1.1 A__ = torch.sigmoid(lowercase_ ) A__ = s * (r - l) + l A__ = s_bar.clamp(min=0.0 , max=1.0 ) A__ = tensor * mask print(f"""Pruned layer {name}""" ) else: raise ValueError('''Unknown pruning method''' ) if target_model_path is None: A__ = os.path.join( os.path.dirname(lowercase_ ) , f"""bertarized_{os.path.basename(lowercase_ )}""" ) if not os.path.isdir(lowercase_ ): shutil.copytree(lowercase_ , lowercase_ ) print(f"""\nCreated folder {target_model_path}""" ) torch.save(lowercase_ , os.path.join(lowercase_ , '''pytorch_model.bin''' ) ) print('''\nPruned model saved! See you later!''' ) if __name__ == "__main__": _lowerCamelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument( """--pruning_method""", choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""], type=str, required=True, help=( """Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,""" """ sigmoied_threshold = Soft movement pruning)""" ), ) parser.add_argument( """--threshold""", type=float, required=False, help=( """For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.""" """For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.""" """Not needed for `l0`""" ), ) parser.add_argument( """--model_name_or_path""", type=str, required=True, help="""Folder containing the model that was previously fine-pruned""", ) parser.add_argument( """--target_model_path""", default=None, type=str, required=False, help="""Folder containing the model that was previously fine-pruned""", ) _lowerCamelCase : int = parser.parse_args() main(args)
14
1
import datasets from .evaluate import evaluate _lowerCamelCase : Any = """\ @inproceedings{Rajpurkar2016SQuAD10, title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text}, author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang}, booktitle={EMNLP}, year={2016} } """ _lowerCamelCase : int = """ This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD). Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span, from the corresponding reading passage, or the question might be unanswerable. """ _lowerCamelCase : Dict = """ Computes SQuAD scores (F1 and EM). Args: predictions: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair as given in the references (see below) - 'prediction_text': the text of the answer references: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair (see above), - 'answers': a Dict in the SQuAD dataset format { 'text': list of possible texts for the answer, as a list of strings 'answer_start': list of start positions for the answer, as a list of ints } Note that answer_start values are not taken into account to compute the metric. Returns: 'exact_match': Exact match (the normalized answer exactly match the gold answer) 'f1': The F-score of predicted tokens versus the gold answer Examples: >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}] >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}] >>> squad_metric = datasets.load_metric(\"squad\") >>> results = squad_metric.compute(predictions=predictions, references=references) >>> print(results) {'exact_match': 100.0, 'f1': 100.0} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase_ ( datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE ( self : List[Any]) ->int: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': {'''id''': datasets.Value('''string'''), '''prediction_text''': datasets.Value('''string''')}, '''references''': { '''id''': datasets.Value('''string'''), '''answers''': datasets.features.Sequence( { '''text''': datasets.Value('''string'''), '''answer_start''': datasets.Value('''int32'''), }), }, }) , codebase_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , reference_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , ) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any]) ->Tuple: '''simple docstring''' A__ = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions} A__ = [ { '''paragraphs''': [ { '''qas''': [ { '''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']], '''id''': ref['''id'''], } for ref in references ] } ] } ] A__ = evaluate(dataset=UpperCAmelCase__ , predictions=UpperCAmelCase__) return score
14
_lowerCamelCase : Optional[int] = 65521 def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int: """simple docstring""" A__ = 1 A__ = 0 for plain_chr in plain_text: A__ = (a + ord(lowercase_ )) % MOD_ADLER A__ = (b + a) % MOD_ADLER return (b << 16) | a
14
1
import os import re import warnings from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_ta import TaTokenizer else: _lowerCamelCase : Optional[int] = None _lowerCamelCase : List[Any] = logging.get_logger(__name__) _lowerCamelCase : List[str] = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} _lowerCamelCase : Union[str, Any] = { """vocab_file""": { """t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""", """t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""", """t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""", """t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""", """t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""", }, """tokenizer_file""": { """t5-small""": """https://huggingface.co/t5-small/resolve/main/tokenizer.json""", """t5-base""": """https://huggingface.co/t5-base/resolve/main/tokenizer.json""", """t5-large""": """https://huggingface.co/t5-large/resolve/main/tokenizer.json""", """t5-3b""": """https://huggingface.co/t5-3b/resolve/main/tokenizer.json""", """t5-11b""": """https://huggingface.co/t5-11b/resolve/main/tokenizer.json""", }, } # TODO(PVP) - this should be removed in Transformers v5 _lowerCamelCase : Union[str, Any] = { """t5-small""": 512, """t5-base""": 512, """t5-large""": 512, """t5-3b""": 512, """t5-11b""": 512, } class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = ['''input_ids''', '''attention_mask'''] UpperCAmelCase__ = TaTokenizer UpperCAmelCase__ = [] def __init__( self : List[Any] , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Dict="</s>" , UpperCAmelCase__ : str="<unk>" , UpperCAmelCase__ : Optional[int]="<pad>" , UpperCAmelCase__ : Optional[int]=100 , UpperCAmelCase__ : Any=None , **UpperCAmelCase__ : int , ) ->int: '''simple docstring''' if extra_ids > 0 and additional_special_tokens is None: A__ = [f"""<extra_id_{i}>""" for i in range(UpperCAmelCase__)] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra special tokens A__ = len(set(filter(lambda UpperCAmelCase__: bool('''extra_id_''' in str(UpperCAmelCase__)) , UpperCAmelCase__))) if extra_tokens != extra_ids: raise ValueError( f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are""" ''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids''' ''' tokens''') super().__init__( UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , extra_ids=UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , **UpperCAmelCase__ , ) A__ = vocab_file A__ = False if not self.vocab_file else True A__ = extra_ids @staticmethod def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int) ->int: '''simple docstring''' if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes: A__ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( '''This tokenizer was incorrectly instantiated with a model max length of''' f""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this""" ''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with''' ''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on''' f""" {pretrained_model_name_or_path} automatically truncating your input to""" f""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences""" f""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with""" ''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please''' ''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , UpperCAmelCase__ , ) return max_model_length def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None) ->Tuple[str]: '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''') if not os.path.isdir(UpperCAmelCase__): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""") return A__ = os.path.join( UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file''']) if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCAmelCase__): copyfile(self.vocab_file , UpperCAmelCase__) logger.info(f"""Copy vocab file to {out_vocab_file}""") return (out_vocab_file,) def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None) ->List[int]: '''simple docstring''' A__ = token_ids_a + [self.eos_token_id] if token_ids_a is None: return self.prefix_tokens + token_ids_a else: A__ = token_ids_a + [self.eos_token_id] return self.prefix_tokens + token_ids_a + token_ids_a def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None) ->List[int]: '''simple docstring''' A__ = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos) * [0] return len(token_ids_a + eos + token_ids_a + eos) * [0] def SCREAMING_SNAKE_CASE ( self : Any) ->List[str]: '''simple docstring''' return list( set(filter(lambda UpperCAmelCase__: bool(re.search(R'''<extra_id_\d+>''' , UpperCAmelCase__)) is not None , self.additional_special_tokens))) def SCREAMING_SNAKE_CASE ( self : str) ->List[str]: '''simple docstring''' return [self.convert_tokens_to_ids(UpperCAmelCase__) for token in self.get_sentinel_tokens()]
14
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer _lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) _lowerCamelCase : Tuple = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} _lowerCamelCase : Union[str, Any] = { """vocab_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } _lowerCamelCase : str = { """vocab_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } _lowerCamelCase : str = { """vocab_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json""" ), }, } _lowerCamelCase : Any = { """facebook/dpr-ctx_encoder-single-nq-base""": 512, """facebook/dpr-ctx_encoder-multiset-base""": 512, } _lowerCamelCase : List[str] = { """facebook/dpr-question_encoder-single-nq-base""": 512, """facebook/dpr-question_encoder-multiset-base""": 512, } _lowerCamelCase : Tuple = { """facebook/dpr-reader-single-nq-base""": 512, """facebook/dpr-reader-multiset-base""": 512, } _lowerCamelCase : Optional[Any] = { """facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True}, } _lowerCamelCase : Optional[int] = { """facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True}, } _lowerCamelCase : Optional[Any] = { """facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True}, } class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCAmelCase__ = DPRContextEncoderTokenizer class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCAmelCase__ = DPRQuestionEncoderTokenizer _lowerCamelCase : int = collections.namedtuple( """DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""] ) _lowerCamelCase : Any = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""]) _lowerCamelCase : Dict = r""" Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Return: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. """ @add_start_docstrings(UpperCAmelCase__ ) class UpperCamelCase_ : '''simple docstring''' def __call__( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Union[bool, str] = False , UpperCAmelCase__ : Union[bool, str] = False , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Optional[bool] = None , **UpperCAmelCase__ : Optional[int] , ) ->BatchEncoding: '''simple docstring''' if titles is None and texts is None: return super().__call__( UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , ) elif titles is None or texts is None: A__ = titles if texts is None else texts return super().__call__( UpperCAmelCase__ , UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , ) A__ = titles if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [titles] A__ = texts if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [texts] A__ = len(UpperCAmelCase__) A__ = questions if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [questions] * n_passages assert len(UpperCAmelCase__) == len( UpperCAmelCase__), f"""There should be as many titles than texts but got {len(UpperCAmelCase__)} titles and {len(UpperCAmelCase__)} texts.""" A__ = super().__call__(UpperCAmelCase__ , UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__)['''input_ids'''] A__ = super().__call__(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__)['''input_ids'''] A__ = { '''input_ids''': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(UpperCAmelCase__ , UpperCAmelCase__) ] } if return_attention_mask is not False: A__ = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids]) A__ = attention_mask return self.pad(UpperCAmelCase__ , padding=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : BatchEncoding , UpperCAmelCase__ : DPRReaderOutput , UpperCAmelCase__ : int = 16 , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : int = 4 , ) ->List[DPRSpanPrediction]: '''simple docstring''' A__ = reader_input['''input_ids'''] A__ , A__ , A__ = reader_output[:3] A__ = len(UpperCAmelCase__) A__ = sorted(range(UpperCAmelCase__) , reverse=UpperCAmelCase__ , key=relevance_logits.__getitem__) A__ = [] for doc_id in sorted_docs: A__ = list(input_ids[doc_id]) # assuming question & title information is at the beginning of the sequence A__ = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: A__ = sequence_ids.index(self.pad_token_id) else: A__ = len(UpperCAmelCase__) A__ = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCAmelCase__ , top_spans=UpperCAmelCase__ , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCAmelCase__ , start_index=UpperCAmelCase__ , end_index=UpperCAmelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1]) , )) if len(UpperCAmelCase__) >= num_spans: break return nbest_spans_predictions[:num_spans] def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , ) ->List[DPRSpanPrediction]: '''simple docstring''' A__ = [] for start_index, start_score in enumerate(UpperCAmelCase__): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]): scores.append(((start_index, start_index + answer_length), start_score + end_score)) A__ = sorted(UpperCAmelCase__ , key=lambda UpperCAmelCase__: x[1] , reverse=UpperCAmelCase__) A__ = [] for (start_index, end_index), score in scores: assert start_index <= end_index, f"""Wrong span indices: [{start_index}:{end_index}]""" A__ = end_index - start_index + 1 assert length <= max_answer_length, f"""Span is too long: {length} > {max_answer_length}""" if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals): continue chosen_span_intervals.append((start_index, end_index)) if len(UpperCAmelCase__) == top_spans: break return chosen_span_intervals @add_end_docstrings(UpperCAmelCase__ ) class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = READER_PRETRAINED_INIT_CONFIGURATION UpperCAmelCase__ = ['''input_ids''', '''attention_mask'''] UpperCAmelCase__ = DPRReaderTokenizer
14
1
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase : List[Any] = logging.get_logger(__name__) _lowerCamelCase : Optional[int] = { """studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""", """studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""", } class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''luke''' def __init__( self : Any , UpperCAmelCase__ : str=50_267 , UpperCAmelCase__ : Any=500_000 , UpperCAmelCase__ : Dict=768 , UpperCAmelCase__ : str=256 , UpperCAmelCase__ : Dict=12 , UpperCAmelCase__ : List[str]=12 , UpperCAmelCase__ : Optional[Any]=3_072 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Optional[int]=512 , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : str=0.02 , UpperCAmelCase__ : Optional[Any]=1e-12 , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : str=1 , UpperCAmelCase__ : Union[str, Any]=0 , UpperCAmelCase__ : Optional[int]=2 , **UpperCAmelCase__ : Any , ) ->List[Any]: '''simple docstring''' super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__) A__ = vocab_size A__ = entity_vocab_size A__ = hidden_size A__ = entity_emb_size A__ = num_hidden_layers A__ = num_attention_heads A__ = hidden_act A__ = intermediate_size A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_size A__ = initializer_range A__ = layer_norm_eps A__ = use_entity_aware_attention A__ = classifier_dropout
14
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase : Any = logging.get_logger(__name__) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''encoder-decoder''' UpperCAmelCase__ = True def __init__( self : List[str] , **UpperCAmelCase__ : Union[str, Any]) ->List[Any]: '''simple docstring''' super().__init__(**UpperCAmelCase__) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" A__ = kwargs.pop('''encoder''') A__ = encoder_config.pop('''model_type''') A__ = kwargs.pop('''decoder''') A__ = decoder_config.pop('''model_type''') from ..auto.configuration_auto import AutoConfig A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__) A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__) A__ = True @classmethod def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : Union[str, Any]) ->PretrainedConfig: '''simple docstring''' logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''') A__ = True A__ = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]: '''simple docstring''' A__ = copy.deepcopy(self.__dict__) A__ = self.encoder.to_dict() A__ = self.decoder.to_dict() A__ = self.__class__.model_type return output
14
1
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = ['''image_processor''', '''tokenizer'''] UpperCAmelCase__ = '''LayoutLMv3ImageProcessor''' UpperCAmelCase__ = ('''LayoutLMv3Tokenizer''', '''LayoutLMv3TokenizerFast''') def __init__( self : Optional[int] , UpperCAmelCase__ : int=None , UpperCAmelCase__ : List[str]=None , **UpperCAmelCase__ : str) ->Tuple: '''simple docstring''' A__ = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , UpperCAmelCase__ , ) A__ = kwargs.pop('''feature_extractor''') A__ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''') if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''') super().__init__(UpperCAmelCase__ , UpperCAmelCase__) def __call__( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase__ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , UpperCAmelCase__ : Union[List[List[int]], List[List[List[int]]]] = None , UpperCAmelCase__ : Optional[Union[List[int], List[List[int]]]] = None , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase__ : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , **UpperCAmelCase__ : List[str] , ) ->BatchEncoding: '''simple docstring''' if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( '''You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.''') if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( '''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''') # first, apply the image processor A__ = self.image_processor(images=UpperCAmelCase__ , return_tensors=UpperCAmelCase__) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(UpperCAmelCase__ , UpperCAmelCase__): A__ = [text] # add batch dimension (as the image processor always adds a batch dimension) A__ = features['''words'''] A__ = self.tokenizer( text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , stride=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , return_overflowing_tokens=UpperCAmelCase__ , return_special_tokens_mask=UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , return_length=UpperCAmelCase__ , verbose=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ , ) # add pixel values A__ = features.pop('''pixel_values''') if return_overflowing_tokens is True: A__ = self.get_overflowing_images(UpperCAmelCase__ , encoded_inputs['''overflow_to_sample_mapping''']) A__ = images return encoded_inputs def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str]) ->List[Any]: '''simple docstring''' A__ = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx]) if len(UpperCAmelCase__) != len(UpperCAmelCase__): raise ValueError( '''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got''' f""" {len(UpperCAmelCase__)} and {len(UpperCAmelCase__)}""") return images_with_overflow def SCREAMING_SNAKE_CASE ( self : int , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Dict) ->Dict: '''simple docstring''' return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Dict , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : Optional[Any]) ->Union[str, Any]: '''simple docstring''' return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__) @property def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]: '''simple docstring''' return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def SCREAMING_SNAKE_CASE ( self : int) ->Union[str, Any]: '''simple docstring''' warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCAmelCase__ , ) return self.image_processor_class @property def SCREAMING_SNAKE_CASE ( self : Tuple) ->Union[str, Any]: '''simple docstring''' warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCAmelCase__ , ) return self.image_processor
14
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any: """simple docstring""" A__ = [0] * len(lowercase_ ) A__ = [] A__ = [1] * len(lowercase_ ) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(lowercase_ ) ): if indegree[i] == 0: queue.append(lowercase_ ) while queue: A__ = queue.pop(0 ) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: A__ = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(lowercase_ ) print(max(lowercase_ ) ) # Adjacency list of Graph _lowerCamelCase : Optional[int] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
14
1
from __future__ import annotations from scipy.special import comb # type: ignore class UpperCamelCase_ : '''simple docstring''' def __init__( self : List[Any] , UpperCAmelCase__ : list[tuple[float, float]]) ->List[str]: '''simple docstring''' A__ = list_of_points # Degree determines the flexibility of the curve. # Degree = 1 will produce a straight line. A__ = len(UpperCAmelCase__) - 1 def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : float) ->list[float]: '''simple docstring''' assert 0 <= t <= 1, "Time t must be between 0 and 1." A__ = [] for i in range(len(self.list_of_points)): # basis function for each i output_values.append( comb(self.degree , UpperCAmelCase__) * ((1 - t) ** (self.degree - i)) * (t**i)) # the basis must sum up to 1 for it to produce a valid Bezier curve. assert round(sum(UpperCAmelCase__) , 5) == 1 return output_values def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : float) ->tuple[float, float]: '''simple docstring''' assert 0 <= t <= 1, "Time t must be between 0 and 1." A__ = self.basis_function(UpperCAmelCase__) A__ = 0.0 A__ = 0.0 for i in range(len(self.list_of_points)): # For all points, sum up the product of i-th basis function and i-th point. x += basis_function[i] * self.list_of_points[i][0] y += basis_function[i] * self.list_of_points[i][1] return (x, y) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : float = 0.01) ->int: '''simple docstring''' from matplotlib import pyplot as plt # type: ignore A__ = [] # x coordinates of points to plot A__ = [] # y coordinates of points to plot A__ = 0.0 while t <= 1: A__ = self.bezier_curve_function(UpperCAmelCase__) to_plot_x.append(value[0]) to_plot_y.append(value[1]) t += step_size A__ = [i[0] for i in self.list_of_points] A__ = [i[1] for i in self.list_of_points] plt.plot( UpperCAmelCase__ , UpperCAmelCase__ , color='''blue''' , label='''Curve of Degree ''' + str(self.degree) , ) plt.scatter(UpperCAmelCase__ , UpperCAmelCase__ , color='''red''' , label='''Control Points''') plt.legend() plt.show() if __name__ == "__main__": import doctest doctest.testmod() BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1 BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2 BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
14
import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline _lowerCamelCase : Optional[Any] = datasets.utils.logging.get_logger(__name__) @dataclass class UpperCamelCase_ ( datasets.BuilderConfig ): '''simple docstring''' UpperCAmelCase__ = None UpperCAmelCase__ = "utf-8" UpperCAmelCase__ = None UpperCAmelCase__ = None UpperCAmelCase__ = True # deprecated UpperCAmelCase__ = None # deprecated UpperCAmelCase__ = 10 << 20 # 10MB UpperCAmelCase__ = None class UpperCamelCase_ ( datasets.ArrowBasedBuilder ): '''simple docstring''' UpperCAmelCase__ = JsonConfig def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str: '''simple docstring''' if self.config.block_size is not None: logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''') A__ = self.config.block_size if self.config.use_threads is not True: logger.warning( '''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''') if self.config.newlines_in_values is not None: raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''') return datasets.DatasetInfo(features=self.config.features) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : List[Any]) ->Dict: '''simple docstring''' if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""") A__ = dl_manager.download_and_extract(self.config.data_files) if isinstance(UpperCAmelCase__ , (str, list, tuple)): A__ = data_files if isinstance(UpperCAmelCase__ , UpperCAmelCase__): A__ = [files] A__ = [dl_manager.iter_files(UpperCAmelCase__) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files})] A__ = [] for split_name, files in data_files.items(): if isinstance(UpperCAmelCase__ , UpperCAmelCase__): A__ = [files] A__ = [dl_manager.iter_files(UpperCAmelCase__) for file in files] splits.append(datasets.SplitGenerator(name=UpperCAmelCase__ , gen_kwargs={'''files''': files})) return splits def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : pa.Table) ->pa.Table: '''simple docstring''' if self.config.features is not None: # adding missing columns for column_name in set(self.config.features) - set(pa_table.column_names): A__ = self.config.features.arrow_schema.field(UpperCAmelCase__).type A__ = pa_table.append_column(UpperCAmelCase__ , pa.array([None] * len(UpperCAmelCase__) , type=UpperCAmelCase__)) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example A__ = table_cast(UpperCAmelCase__ , self.config.features.arrow_schema) return pa_table def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Tuple) ->str: '''simple docstring''' for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase__)): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(UpperCAmelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f: A__ = json.load(UpperCAmelCase__) # We keep only the field we are interested in A__ = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(UpperCAmelCase__ , (list, tuple)): A__ = set().union(*[row.keys() for row in dataset]) A__ = {col: [row.get(UpperCAmelCase__) for row in dataset] for col in keys} else: A__ = dataset A__ = pa.Table.from_pydict(UpperCAmelCase__) yield file_idx, self._cast_table(UpperCAmelCase__) # If the file has one json object per line else: with open(UpperCAmelCase__ , '''rb''') as f: A__ = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small A__ = max(self.config.chunksize // 32 , 16 << 10) A__ = ( self.config.encoding_errors if self.config.encoding_errors is not None else '''strict''' ) while True: A__ = f.read(self.config.chunksize) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(UpperCAmelCase__) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": A__ = batch.decode(self.config.encoding , errors=UpperCAmelCase__).encode('''utf-8''') try: while True: try: A__ = paj.read_json( io.BytesIO(UpperCAmelCase__) , read_options=paj.ReadOptions(block_size=UpperCAmelCase__)) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(UpperCAmelCase__ , pa.ArrowInvalid) and "straddling" not in str(UpperCAmelCase__) or block_size > len(UpperCAmelCase__) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( f"""Batch of {len(UpperCAmelCase__)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""") block_size *= 2 except pa.ArrowInvalid as e: try: with open( UpperCAmelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f: A__ = json.load(UpperCAmelCase__) except json.JSONDecodeError: logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""") raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(UpperCAmelCase__ , UpperCAmelCase__): # list is the only sequence type supported in JSON try: A__ = set().union(*[row.keys() for row in dataset]) A__ = {col: [row.get(UpperCAmelCase__) for row in dataset] for col in keys} A__ = pa.Table.from_pydict(UpperCAmelCase__) except (pa.ArrowInvalid, AttributeError) as e: logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""") raise ValueError(f"""Not able to read records in the JSON file at {file}.""") from None yield file_idx, self._cast_table(UpperCAmelCase__) break else: logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""") raise ValueError( f"""Not able to read records in the JSON file at {file}. """ f"""You should probably indicate the field of the JSON file containing your records. """ f"""This JSON file contain the following fields: {str(list(dataset.keys()))}. """ f"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase__) batch_idx += 1
14
1
from __future__ import annotations import csv import requests from bsa import BeautifulSoup def SCREAMING_SNAKE_CASE ( lowercase_ = "" ) -> dict[str, float]: """simple docstring""" A__ = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250''' A__ = BeautifulSoup(requests.get(lowercase_ ).text , '''html.parser''' ) A__ = soup.find_all('''td''' , attrs='''titleColumn''' ) A__ = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' ) return { title.a.text: float(rating.strong.text ) for title, rating in zip(lowercase_ , lowercase_ ) } def SCREAMING_SNAKE_CASE ( lowercase_ = "IMDb_Top_250_Movies.csv" ) -> None: """simple docstring""" A__ = get_imdb_top_aaa_movies() with open(lowercase_ , '''w''' , newline='''''' ) as out_file: A__ = csv.writer(lowercase_ ) writer.writerow(['''Movie title''', '''IMDb rating'''] ) for title, rating in movies.items(): writer.writerow([title, rating] ) if __name__ == "__main__": write_movies()
14
import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch _lowerCamelCase : List[Any] = """sshleifer/bart-tiny-random""" _lowerCamelCase : List[Any] = """patrickvonplaten/t5-tiny-random""" @require_torch class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int: '''simple docstring''' return AutoConfig.from_pretrained(UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Any: '''simple docstring''' A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1) self.assertEqual(student.config.num_hidden_layers , 1) def SCREAMING_SNAKE_CASE ( self : int) ->Any: '''simple docstring''' A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]: '''simple docstring''' A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase__) self.assertEqual(student.config.encoder_layers , 1) self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers) def SCREAMING_SNAKE_CASE ( self : Dict) ->int: '''simple docstring''' A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1) self.assertEqual(student.config.encoder_layers , 1) self.assertEqual(student.config.decoder_layers , 1) def SCREAMING_SNAKE_CASE ( self : str) ->List[Any]: '''simple docstring''' with self.assertRaises(UpperCAmelCase__): create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=UpperCAmelCase__ , d=UpperCAmelCase__)
14
1
import argparse import logging import os import time import timeit import datasets import numpy as np import pycuda.autoinit # noqa: F401 import pycuda.driver as cuda import tensorrt as trt import torch from absl import logging as absl_logging from accelerate import Accelerator from datasets import load_dataset, load_metric from torch.utils.data import DataLoader from utils_qa import postprocess_qa_predictions import transformers from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed from transformers.trainer_pt_utils import nested_concat, nested_truncate _lowerCamelCase : List[str] = trt.Logger(trt.Logger.WARNING) _lowerCamelCase : Dict = absl_logging.get_absl_logger() absl_logger.setLevel(logging.WARNING) _lowerCamelCase : Optional[int] = logging.getLogger(__name__) _lowerCamelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--onnx_model_path""", default=None, type=str, required=True, help="""Path to ONNX model: """, ) parser.add_argument( """--output_dir""", default=None, type=str, required=True, help="""The output directory where the model checkpoints and predictions will be written.""", ) # Other parameters parser.add_argument( """--tokenizer_name""", default="""""", type=str, required=True, help="""Pretrained tokenizer name or path if not the same as model_name""", ) parser.add_argument( """--version_2_with_negative""", action="""store_true""", help="""If true, the SQuAD examples contain some that do not have an answer.""", ) parser.add_argument( """--null_score_diff_threshold""", type=float, default=0.0, help="""If null_score - best_non_null is greater than the threshold predict null.""", ) parser.add_argument( """--max_seq_length""", default=384, type=int, help=( """The maximum total input sequence length after WordPiece tokenization. Sequences """ """longer than this will be truncated, and sequences shorter than this will be padded.""" ), ) parser.add_argument( """--doc_stride""", default=128, type=int, help="""When splitting up a long document into chunks, how much stride to take between chunks.""", ) parser.add_argument("""--per_device_eval_batch_size""", default=8, type=int, help="""Batch size per GPU/CPU for evaluation.""") parser.add_argument( """--n_best_size""", default=20, type=int, help="""The total number of n-best predictions to generate in the nbest_predictions.json output file.""", ) parser.add_argument( """--max_answer_length""", default=30, type=int, help=( """The maximum length of an answer that can be generated. This is needed because the start """ """and end predictions are not conditioned on one another.""" ), ) parser.add_argument("""--seed""", type=int, default=42, help="""random seed for initialization""") parser.add_argument( """--dataset_name""", type=str, default=None, required=True, help="""The name of the dataset to use (via the datasets library).""", ) parser.add_argument( """--dataset_config_name""", type=str, default=None, help="""The configuration name of the dataset to use (via the datasets library).""", ) parser.add_argument( """--preprocessing_num_workers""", type=int, default=4, help="""A csv or a json file containing the training data.""" ) parser.add_argument("""--overwrite_cache""", action="""store_true""", help="""Overwrite the cached training and evaluation sets""") parser.add_argument( """--fp16""", action="""store_true""", help="""Whether to use 16-bit (mixed) precision instead of 32-bit""", ) parser.add_argument( """--int8""", action="""store_true""", help="""Whether to use INT8""", ) _lowerCamelCase : Tuple = parser.parse_args() if args.tokenizer_name: _lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True) else: raise ValueError( """You are instantiating a new tokenizer from scratch. This is not supported by this script.""" """You can do it from another script, save it, and load it from here, using --tokenizer_name.""" ) logger.info("""Training/evaluation parameters %s""", args) _lowerCamelCase : List[Any] = args.per_device_eval_batch_size _lowerCamelCase : Optional[Any] = (args.eval_batch_size, args.max_seq_length) # TRT Engine properties _lowerCamelCase : List[str] = True _lowerCamelCase : List[Any] = """temp_engine/bert-fp32.engine""" if args.fpaa: _lowerCamelCase : Optional[Any] = """temp_engine/bert-fp16.engine""" if args.inta: _lowerCamelCase : Any = """temp_engine/bert-int8.engine""" # import ONNX file if not os.path.exists("""temp_engine"""): os.makedirs("""temp_engine""") _lowerCamelCase : Optional[int] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser( network, TRT_LOGGER ) as parser: with open(args.onnx_model_path, """rb""") as model: if not parser.parse(model.read()): for error in range(parser.num_errors): print(parser.get_error(error)) # Query input names and shapes from parsed TensorRT network _lowerCamelCase : Optional[int] = [network.get_input(i) for i in range(network.num_inputs)] _lowerCamelCase : Union[str, Any] = [_input.name for _input in network_inputs] # ex: ["actual_input1"] with builder.create_builder_config() as config: _lowerCamelCase : Tuple = 1 << 50 if STRICT_TYPES: config.set_flag(trt.BuilderFlag.STRICT_TYPES) if args.fpaa: config.set_flag(trt.BuilderFlag.FPaa) if args.inta: config.set_flag(trt.BuilderFlag.INTa) _lowerCamelCase : Optional[Any] = builder.create_optimization_profile() config.add_optimization_profile(profile) for i in range(len(input_names)): profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE) _lowerCamelCase : Union[str, Any] = builder.build_engine(network, config) # serialize_engine and store in file (can be directly loaded and deserialized): with open(engine_name, """wb""") as f: f.write(engine.serialize()) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]: """simple docstring""" A__ = np.asarray(inputs['''input_ids'''] , dtype=np.intaa ) A__ = np.asarray(inputs['''attention_mask'''] , dtype=np.intaa ) A__ = np.asarray(inputs['''token_type_ids'''] , dtype=np.intaa ) # Copy inputs cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowercase_ ) cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowercase_ ) cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowercase_ ) # start time A__ = time.time() # Run inference context.execute_async( bindings=[int(lowercase_ ) for d_inp in d_inputs] + [int(lowercase_ ), int(lowercase_ )] , stream_handle=stream.handle ) # Transfer predictions back from GPU cuda.memcpy_dtoh_async(lowercase_ , lowercase_ , lowercase_ ) cuda.memcpy_dtoh_async(lowercase_ , lowercase_ , lowercase_ ) # Synchronize the stream and take time stream.synchronize() # end time A__ = time.time() A__ = end_time - start_time A__ = (h_outputa, h_outputa) # print(outputs) return outputs, infer_time # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. _lowerCamelCase : List[Any] = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). if args.dataset_name is not None: # Downloading and loading a dataset from the hub. _lowerCamelCase : Optional[int] = load_dataset(args.dataset_name, args.dataset_config_name) else: raise ValueError("""Evaluation requires a dataset name""") # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. _lowerCamelCase : int = raw_datasets["""validation"""].column_names _lowerCamelCase : str = """question""" if """question""" in column_names else column_names[0] _lowerCamelCase : Optional[int] = """context""" if """context""" in column_names else column_names[1] _lowerCamelCase : Tuple = """answers""" if """answers""" in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). _lowerCamelCase : Optional[int] = tokenizer.padding_side == """right""" if args.max_seq_length > tokenizer.model_max_length: logger.warning( F'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the''' F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' ) _lowerCamelCase : Union[str, Any] = min(args.max_seq_length, tokenizer.model_max_length) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int: """simple docstring""" A__ = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. A__ = tokenizer( examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='''only_second''' if pad_on_right else '''only_first''' , max_length=lowercase_ , stride=args.doc_stride , return_overflowing_tokens=lowercase_ , return_offsets_mapping=lowercase_ , padding='''max_length''' , ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. A__ = tokenized_examples.pop('''overflow_to_sample_mapping''' ) # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. A__ = [] for i in range(len(tokenized_examples['''input_ids'''] ) ): # Grab the sequence corresponding to that example (to know what is the context and what is the question). A__ = tokenized_examples.sequence_ids(lowercase_ ) A__ = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. A__ = sample_mapping[i] tokenized_examples["example_id"].append(examples['''id'''][sample_index] ) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. A__ = [ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples['''offset_mapping'''][i] ) ] return tokenized_examples _lowerCamelCase : int = raw_datasets["""validation"""] # Validation Feature Creation _lowerCamelCase : Optional[Any] = eval_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc="""Running tokenizer on validation dataset""", ) _lowerCamelCase : Tuple = default_data_collator _lowerCamelCase : Union[str, Any] = eval_dataset.remove_columns(["""example_id""", """offset_mapping"""]) _lowerCamelCase : Any = DataLoader( eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_="eval" ) -> Tuple: """simple docstring""" A__ = postprocess_qa_predictions( examples=lowercase_ , features=lowercase_ , predictions=lowercase_ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowercase_ , ) # Format the result to the format the metric expects. if args.version_2_with_negative: A__ = [ {'''id''': k, '''prediction_text''': v, '''no_answer_probability''': 0.0} for k, v in predictions.items() ] else: A__ = [{'''id''': k, '''prediction_text''': v} for k, v in predictions.items()] A__ = [{'''id''': ex['''id'''], '''answers''': ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=lowercase_ , label_ids=lowercase_ ) _lowerCamelCase : List[Any] = load_metric("""squad_v2""" if args.version_2_with_negative else """squad""") # Evaluation! logger.info("""Loading ONNX model %s for evaluation""", args.onnx_model_path) with open(engine_name, """rb""") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine( f.read() ) as engine, engine.create_execution_context() as context: # setup for TRT inferrence for i in range(len(input_names)): context.set_binding_shape(i, INPUT_SHAPE) assert context.all_binding_shapes_specified def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[int]: """simple docstring""" return trt.volume(engine.get_binding_shape(lowercase_ ) ) * engine.get_binding_dtype(lowercase_ ).itemsize # Allocate device memory for inputs and outputs. _lowerCamelCase : int = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)] # Allocate output buffer _lowerCamelCase : int = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa) _lowerCamelCase : Optional[Any] = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa) _lowerCamelCase : int = cuda.mem_alloc(h_outputa.nbytes) _lowerCamelCase : List[Any] = cuda.mem_alloc(h_outputa.nbytes) # Create a stream in which to copy inputs/outputs and run inference. _lowerCamelCase : int = cuda.Stream() # Evaluation logger.info("""***** Running Evaluation *****""") logger.info(F''' Num examples = {len(eval_dataset)}''') logger.info(F''' Batch size = {args.per_device_eval_batch_size}''') _lowerCamelCase : Dict = 0.0 _lowerCamelCase : List[str] = 0 _lowerCamelCase : Optional[Any] = timeit.default_timer() _lowerCamelCase : List[Any] = None for step, batch in enumerate(eval_dataloader): _lowerCamelCase , _lowerCamelCase : Union[str, Any] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream) total_time += infer_time niter += 1 _lowerCamelCase , _lowerCamelCase : Optional[int] = outputs _lowerCamelCase : Any = torch.tensor(start_logits) _lowerCamelCase : Optional[Any] = torch.tensor(end_logits) # necessary to pad predictions and labels for being gathered _lowerCamelCase : Union[str, Any] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100) _lowerCamelCase : Optional[int] = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100) _lowerCamelCase : Union[str, Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy()) _lowerCamelCase : int = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100) if all_preds is not None: _lowerCamelCase : Tuple = nested_truncate(all_preds, len(eval_dataset)) _lowerCamelCase : Union[str, Any] = timeit.default_timer() - start_time logger.info(""" Evaluation done in total %f secs (%f sec per example)""", evalTime, evalTime / len(eval_dataset)) # Inference time from TRT logger.info("""Average Inference Time = {:.3f} ms""".format(total_time * 1000 / niter)) logger.info("""Total Inference Time = {:.3f} ms""".format(total_time * 1000)) logger.info("""Total Number of Inference = %d""", niter) _lowerCamelCase : Any = post_processing_function(eval_examples, eval_dataset, all_preds) _lowerCamelCase : List[str] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids) logger.info(F'''Evaluation metrics: {eval_metric}''')
14
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : Dict=3 , UpperCAmelCase__ : List[Any]=30 , UpperCAmelCase__ : Any=400 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Optional[Any]=[0.5, 0.5, 0.5] , UpperCAmelCase__ : Any=[0.5, 0.5, 0.5] , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Optional[int]=1 / 255 , UpperCAmelCase__ : Optional[Any]=True , ) ->str: '''simple docstring''' A__ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333} A__ = parent A__ = batch_size A__ = num_channels A__ = min_resolution A__ = max_resolution A__ = do_resize A__ = size A__ = do_normalize A__ = image_mean A__ = image_std A__ = do_rescale A__ = rescale_factor A__ = do_pad def SCREAMING_SNAKE_CASE ( self : Any) ->List[str]: '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int=False) ->Optional[Any]: '''simple docstring''' if not batched: A__ = image_inputs[0] if isinstance(UpperCAmelCase__ , Image.Image): A__ , A__ = image.size else: A__ , A__ = image.shape[1], image.shape[2] if w < h: A__ = int(self.size['''shortest_edge'''] * h / w) A__ = self.size['''shortest_edge'''] elif w > h: A__ = self.size['''shortest_edge'''] A__ = int(self.size['''shortest_edge'''] * w / h) else: A__ = self.size['''shortest_edge'''] A__ = self.size['''shortest_edge'''] else: A__ = [] for image in image_inputs: A__ , A__ = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) A__ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__: item[0])[0] A__ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__: item[1])[1] return expected_height, expected_width @require_torch @require_vision class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = DeformableDetrImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple: '''simple docstring''' A__ = DeformableDetrImageProcessingTester(self) @property def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(UpperCAmelCase__ , '''image_mean''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''image_std''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_normalize''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_rescale''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_pad''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''size''')) def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int: '''simple docstring''' A__ = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1_333}) self.assertEqual(image_processor.do_pad , UpperCAmelCase__) A__ = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCAmelCase__) self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84}) self.assertEqual(image_processor.do_pad , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Any) ->List[str]: '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) # create random PIL images A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , Image.Image) # Test not batched input A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__) A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , np.ndarray) # Test not batched input A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE ( self : int) ->Tuple: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , torch.Tensor) # Test not batched input A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[str]: '''simple docstring''' A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''') with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''') as f: A__ = json.loads(f.read()) A__ = {'''image_id''': 39_769, '''annotations''': target} # encode them A__ = DeformableDetrImageProcessor() A__ = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , return_tensors='''pt''') # verify pixel values A__ = torch.Size([1, 3, 800, 1_066]) self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase__) A__ = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase__ , atol=1e-4)) # verify area A__ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase__)) # verify boxes A__ = torch.Size([6, 4]) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase__) A__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase__ , atol=1e-3)) # verify image_id A__ = torch.tensor([39_769]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase__)) # verify is_crowd A__ = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase__)) # verify class_labels A__ = torch.tensor([75, 75, 63, 65, 17, 17]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase__)) # verify orig_size A__ = torch.tensor([480, 640]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase__)) # verify size A__ = torch.tensor([800, 1_066]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase__)) @slow def SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[int]: '''simple docstring''' A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''') with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''') as f: A__ = json.loads(f.read()) A__ = {'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target} A__ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''') # encode them A__ = DeformableDetrImageProcessor(format='''coco_panoptic''') A__ = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , masks_path=UpperCAmelCase__ , return_tensors='''pt''') # verify pixel values A__ = torch.Size([1, 3, 800, 1_066]) self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase__) A__ = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase__ , atol=1e-4)) # verify area A__ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase__)) # verify boxes A__ = torch.Size([6, 4]) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase__) A__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase__ , atol=1e-3)) # verify image_id A__ = torch.tensor([39_769]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase__)) # verify is_crowd A__ = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase__)) # verify class_labels A__ = torch.tensor([17, 17, 63, 75, 75, 93]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase__)) # verify masks A__ = 822_873 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCAmelCase__) # verify orig_size A__ = torch.tensor([480, 640]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase__)) # verify size A__ = torch.tensor([800, 1_066]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase__))
14
1
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int: """simple docstring""" assert column_title.isupper() A__ = 0 A__ = len(lowercase_ ) - 1 A__ = 0 while index >= 0: A__ = (ord(column_title[index] ) - 64) * pow(26 , lowercase_ ) answer += value power += 1 index -= 1 return answer if __name__ == "__main__": from doctest import testmod testmod()
14
from __future__ import annotations import typing from collections.abc import Iterable import numpy as np _lowerCamelCase : str = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007 _lowerCamelCase : Tuple = typing.Union[np.floataa, int, float] # noqa: UP007 def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> VectorOut: """simple docstring""" return np.sqrt(np.sum((np.asarray(lowercase_ ) - np.asarray(lowercase_ )) ** 2 ) ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> VectorOut: """simple docstring""" return sum((va - va) ** 2 for va, va in zip(lowercase_ , lowercase_ ) ) ** (1 / 2) if __name__ == "__main__": def SCREAMING_SNAKE_CASE ( ) -> None: """simple docstring""" from timeit import timeit print('''Without Numpy''' ) print( timeit( '''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ) ) print('''With Numpy''' ) print( timeit( '''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ) ) benchmark()
14
1
import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class UpperCamelCase_ : '''simple docstring''' def __init__( self : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int]=13 , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : List[Any]=False , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : List[str]=99 , UpperCAmelCase__ : str=32 , UpperCAmelCase__ : Union[str, Any]=5 , UpperCAmelCase__ : Dict=4 , UpperCAmelCase__ : List[str]=37 , UpperCAmelCase__ : int="gelu" , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : Any=512 , UpperCAmelCase__ : List[str]=16 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : str=0.02 , UpperCAmelCase__ : Any=3 , UpperCAmelCase__ : str=4 , UpperCAmelCase__ : Optional[int]=None , ) ->Optional[int]: '''simple docstring''' A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_input_mask A__ = use_token_type_ids A__ = use_labels A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_size A__ = type_sequence_label_size A__ = initializer_range A__ = num_labels A__ = num_choices A__ = scope def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Any: '''simple docstring''' A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) A__ = None if self.use_input_mask: A__ = random_attention_mask([self.batch_size, self.seq_length]) A__ = None if self.use_token_type_ids: A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) A__ = None A__ = None A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size) A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) A__ = ids_tensor([self.batch_size] , self.num_choices) A__ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE ( self : int) ->List[str]: '''simple docstring''' return LlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : str) ->Any: '''simple docstring''' A__ = LlamaModel(config=UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__) A__ = model(UpperCAmelCase__) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , ) ->Dict: '''simple docstring''' A__ = True A__ = LlamaModel(UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() A__ = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , ) A__ = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , ) A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , ) ->Any: '''simple docstring''' A__ = LlamaForCausalLM(config=UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str , ) ->Any: '''simple docstring''' A__ = True A__ = True A__ = LlamaForCausalLM(config=UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() # first forward pass A__ = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ , ) A__ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids A__ = ids_tensor((self.batch_size, 3) , config.vocab_size) A__ = ids_tensor((self.batch_size, 3) , vocab_size=2) # append to next input_ids and A__ = torch.cat([input_ids, next_tokens] , dim=-1) A__ = torch.cat([input_mask, next_mask] , dim=-1) A__ = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )['''hidden_states'''][0] A__ = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )['''hidden_states'''][0] # select random slice A__ = ids_tensor((1,) , output_from_past.shape[-1]).item() A__ = output_from_no_past[:, -3:, random_slice_idx].detach() A__ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3)) def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[Any]: '''simple docstring''' A__ = self.prepare_config_and_inputs() ( ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ) = config_and_inputs A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () UpperCAmelCase__ = (LlamaForCausalLM,) if is_torch_available() else () UpperCAmelCase__ = ( { '''feature-extraction''': LlamaModel, '''text-classification''': LlamaForSequenceClassification, '''text-generation''': LlamaForCausalLM, '''zero-shot''': LlamaForSequenceClassification, } if is_torch_available() else {} ) UpperCAmelCase__ = False UpperCAmelCase__ = False def SCREAMING_SNAKE_CASE ( self : List[Any]) ->int: '''simple docstring''' A__ = LlamaModelTester(self) A__ = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37) def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]: '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : str) ->List[str]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Tuple: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: A__ = type self.model_tester.create_and_check_model(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[int]: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = 3 A__ = input_dict['''input_ids'''] A__ = input_ids.ne(1).to(UpperCAmelCase__) A__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size) A__ = LlamaForSequenceClassification(UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[str]: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = 3 A__ = '''single_label_classification''' A__ = input_dict['''input_ids'''] A__ = input_ids.ne(1).to(UpperCAmelCase__) A__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size) A__ = LlamaForSequenceClassification(UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) def SCREAMING_SNAKE_CASE ( self : Any) ->Tuple: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = 3 A__ = '''multi_label_classification''' A__ = input_dict['''input_ids'''] A__ = input_ids.ne(1).to(UpperCAmelCase__) A__ = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float) A__ = LlamaForSequenceClassification(UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) @unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''') def SCREAMING_SNAKE_CASE ( self : Any) ->int: '''simple docstring''' pass @parameterized.expand([('''linear''',), ('''dynamic''',)]) def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : Optional[int]) ->List[str]: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = ids_tensor([1, 10] , config.vocab_size) A__ = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size) set_seed(42) # Fixed seed at init time so the two models get the same random weights A__ = LlamaModel(UpperCAmelCase__) original_model.to(UpperCAmelCase__) original_model.eval() A__ = original_model(UpperCAmelCase__).last_hidden_state A__ = original_model(UpperCAmelCase__).last_hidden_state set_seed(42) # Fixed seed at init time so the two models get the same random weights A__ = {'''type''': scaling_type, '''factor''': 10.0} A__ = LlamaModel(UpperCAmelCase__) scaled_model.to(UpperCAmelCase__) scaled_model.eval() A__ = scaled_model(UpperCAmelCase__).last_hidden_state A__ = scaled_model(UpperCAmelCase__).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-5)) else: self.assertFalse(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-5)) # The output should be different for long inputs self.assertFalse(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-5)) @require_torch class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''') @slow def SCREAMING_SNAKE_CASE ( self : Dict) ->Union[str, Any]: '''simple docstring''' A__ = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] A__ = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' , device_map='''auto''') A__ = model(torch.tensor([input_ids])) # Expected mean on dim = -1 A__ = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]]) torch.testing.assert_close(out.mean(-1) , UpperCAmelCase__ , atol=1e-2 , rtol=1e-2) # slicing logits[0, 0, 0:30] # fmt: off A__ = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,]) # fmt: on torch.testing.assert_close(out[0, 0, :30] , UpperCAmelCase__ , atol=1e-5 , rtol=1e-5) @unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''') @slow def SCREAMING_SNAKE_CASE ( self : List[Any]) ->str: '''simple docstring''' A__ = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] A__ = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' , device_map='''auto''') A__ = model(torch.tensor(UpperCAmelCase__)) # Expected mean on dim = -1 A__ = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]]) torch.testing.assert_close(out.mean(-1) , UpperCAmelCase__ , atol=1e-2 , rtol=1e-2) # slicing logits[0, 0, 0:30] # fmt: off A__ = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273]) # fmt: on torch.testing.assert_close(out[0, 0, :30] , UpperCAmelCase__ , atol=1e-5 , rtol=1e-5) @unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''') @slow def SCREAMING_SNAKE_CASE ( self : Tuple) ->Any: '''simple docstring''' A__ = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] A__ = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' , device_map='''auto''') A__ = model(torch.tensor(UpperCAmelCase__)) # Expected mean on dim = -1 A__ = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]]) torch.testing.assert_close(out.mean(-1) , UpperCAmelCase__ , atol=1e-2 , rtol=1e-2) # slicing logits[0, 0, 0:30] # fmt: off A__ = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513]) # fmt: on torch.testing.assert_close(out.mean(-1) , UpperCAmelCase__ , atol=1e-2 , rtol=1e-2) @unittest.skip( '''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''') @slow def SCREAMING_SNAKE_CASE ( self : Dict) ->Dict: '''simple docstring''' A__ = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] A__ = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' , device_map='''auto''') A__ = model(torch.tensor(UpperCAmelCase__)) A__ = torch.tensor( [[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa) torch.testing.assert_close(out.mean(-1) , UpperCAmelCase__ , atol=1e-2 , rtol=1e-2) # fmt: off A__ = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312]) # fmt: on torch.testing.assert_close(out[0, 0, :30] , UpperCAmelCase__ , atol=1e-5 , rtol=1e-5) @unittest.skip('''Model is curently gated''') @slow def SCREAMING_SNAKE_CASE ( self : int) ->Optional[Any]: '''simple docstring''' A__ = '''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi''' A__ = '''Simply put, the theory of relativity states that ''' A__ = LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''') A__ = tokenizer.encode(UpperCAmelCase__ , return_tensors='''pt''') A__ = LlamaForCausalLM.from_pretrained( '''meta-llama/Llama-2-13b-chat-hf''' , device_map='''sequential''' , use_safetensors=UpperCAmelCase__) # greedy generation outputs A__ = model.generate(UpperCAmelCase__ , max_new_tokens=64 , top_p=UpperCAmelCase__ , temperature=1 , do_sample=UpperCAmelCase__) A__ = tokenizer.decode(generated_ids[0] , skip_special_tokens=UpperCAmelCase__) self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__)
14
from ...processing_utils import ProcessorMixin class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''SpeechT5FeatureExtractor''' UpperCAmelCase__ = '''SpeechT5Tokenizer''' def __init__( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple) ->Union[str, Any]: '''simple docstring''' super().__init__(UpperCAmelCase__ , UpperCAmelCase__) def __call__( self : Dict , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Any) ->Optional[Any]: '''simple docstring''' A__ = kwargs.pop('''audio''' , UpperCAmelCase__) A__ = kwargs.pop('''text''' , UpperCAmelCase__) A__ = kwargs.pop('''text_target''' , UpperCAmelCase__) A__ = kwargs.pop('''audio_target''' , UpperCAmelCase__) A__ = kwargs.pop('''sampling_rate''' , UpperCAmelCase__) if audio is not None and text is not None: raise ValueError( '''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''') if audio_target is not None and text_target is not None: raise ValueError( '''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''') if audio is None and audio_target is None and text is None and text_target is None: raise ValueError( '''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''') if audio is not None: A__ = self.feature_extractor(UpperCAmelCase__ , *UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , **UpperCAmelCase__) elif text is not None: A__ = self.tokenizer(UpperCAmelCase__ , **UpperCAmelCase__) else: A__ = None if audio_target is not None: A__ = self.feature_extractor(audio_target=UpperCAmelCase__ , *UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , **UpperCAmelCase__) A__ = targets['''input_values'''] elif text_target is not None: A__ = self.tokenizer(UpperCAmelCase__ , **UpperCAmelCase__) A__ = targets['''input_ids'''] else: A__ = None if inputs is None: return targets if targets is not None: A__ = labels A__ = targets.get('''attention_mask''') if decoder_attention_mask is not None: A__ = decoder_attention_mask return inputs def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : int) ->Optional[int]: '''simple docstring''' A__ = kwargs.pop('''input_values''' , UpperCAmelCase__) A__ = kwargs.pop('''input_ids''' , UpperCAmelCase__) A__ = kwargs.pop('''labels''' , UpperCAmelCase__) if input_values is not None and input_ids is not None: raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''') if input_values is None and input_ids is None and labels is None: raise ValueError( '''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''') if input_values is not None: A__ = self.feature_extractor.pad(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__) elif input_ids is not None: A__ = self.tokenizer.pad(UpperCAmelCase__ , **UpperCAmelCase__) else: A__ = None if labels is not None: if "input_ids" in labels or (isinstance(UpperCAmelCase__ , UpperCAmelCase__) and "input_ids" in labels[0]): A__ = self.tokenizer.pad(UpperCAmelCase__ , **UpperCAmelCase__) A__ = targets['''input_ids'''] else: A__ = self.feature_extractor.feature_size A__ = self.feature_extractor.num_mel_bins A__ = self.feature_extractor.pad(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__) A__ = feature_size_hack A__ = targets['''input_values'''] else: A__ = None if inputs is None: return targets if targets is not None: A__ = labels A__ = targets.get('''attention_mask''') if decoder_attention_mask is not None: A__ = decoder_attention_mask return inputs def SCREAMING_SNAKE_CASE ( self : Any , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Optional[Any]) ->Optional[Any]: '''simple docstring''' return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Union[str, Any]) ->Dict: '''simple docstring''' return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__)
14
1
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int: """simple docstring""" if len(lowercase_ ) != len(lowercase_ ): raise ValueError('''String lengths must match!''' ) A__ = 0 for chara, chara in zip(lowercase_ , lowercase_ ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
14
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase : Tuple = logging.get_logger(__name__) _lowerCamelCase : str = { """microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""", } class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''git_vision_model''' def __init__( self : Any , UpperCAmelCase__ : Any=768 , UpperCAmelCase__ : int=3_072 , UpperCAmelCase__ : List[str]=12 , UpperCAmelCase__ : Dict=12 , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : List[Any]=224 , UpperCAmelCase__ : Union[str, Any]=16 , UpperCAmelCase__ : Union[str, Any]="quick_gelu" , UpperCAmelCase__ : Dict=1e-5 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Any=0.02 , **UpperCAmelCase__ : Any , ) ->Optional[int]: '''simple docstring''' super().__init__(**UpperCAmelCase__) A__ = hidden_size A__ = intermediate_size A__ = num_hidden_layers A__ = num_attention_heads A__ = num_channels A__ = patch_size A__ = image_size A__ = initializer_range A__ = attention_dropout A__ = layer_norm_eps A__ = hidden_act @classmethod def SCREAMING_SNAKE_CASE ( cls : Any , UpperCAmelCase__ : Union[str, os.PathLike] , **UpperCAmelCase__ : int) ->"PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(UpperCAmelCase__) A__ , A__ = cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__) # get the vision config dict if we are loading from GITConfig if config_dict.get('''model_type''') == "git": A__ = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""") return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''git''' def __init__( self : Dict , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int=30_522 , UpperCAmelCase__ : Optional[int]=768 , UpperCAmelCase__ : Dict=6 , UpperCAmelCase__ : int=12 , UpperCAmelCase__ : List[str]=3_072 , UpperCAmelCase__ : str="gelu" , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : List[Any]=1_024 , UpperCAmelCase__ : List[str]=0.02 , UpperCAmelCase__ : Any=1e-12 , UpperCAmelCase__ : Union[str, Any]=0 , UpperCAmelCase__ : List[Any]="absolute" , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : int=101 , UpperCAmelCase__ : Tuple=102 , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : List[str] , ) ->Any: '''simple docstring''' super().__init__(bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__) if vision_config is None: A__ = {} logger.info('''vision_config is None. initializing the GitVisionConfig with default values.''') A__ = GitVisionConfig(**UpperCAmelCase__) A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = hidden_act A__ = intermediate_size A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = initializer_range A__ = layer_norm_eps A__ = position_embedding_type A__ = use_cache A__ = tie_word_embeddings A__ = num_image_with_embedding A__ = bos_token_id A__ = eos_token_id def SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]: '''simple docstring''' A__ = copy.deepcopy(self.__dict__) A__ = self.vision_config.to_dict() A__ = self.__class__.model_type return output
14
1
import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration _lowerCamelCase : Tuple = pytest.mark.integration _lowerCamelCase : Union[str, Any] = {"""comet"""} _lowerCamelCase : Tuple = importlib.util.find_spec("""fairseq""") is not None _lowerCamelCase : Union[str, Any] = {"""code_eval"""} _lowerCamelCase : List[str] = os.name == """nt""" _lowerCamelCase : Union[str, Any] = {"""bertscore""", """frugalscore""", """perplexity"""} _lowerCamelCase : Union[str, Any] = importlib.util.find_spec("""transformers""") is not None def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict: """simple docstring""" @wraps(lowercase_ ) def wrapper(self , lowercase_ ): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest('''"test requires Fairseq"''' ) else: test_case(self , lowercase_ ) return wrapper def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[Any]: """simple docstring""" @wraps(lowercase_ ) def wrapper(self , lowercase_ ): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest('''"test requires transformers"''' ) else: test_case(self , lowercase_ ) return wrapper def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[Any]: """simple docstring""" @wraps(lowercase_ ) def wrapper(self , lowercase_ ): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest('''"test not supported on Windows"''' ) else: test_case(self , lowercase_ ) return wrapper def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]: """simple docstring""" A__ = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('''./metrics/*/''' )] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names() ) @for_all_test_methods( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) @local class UpperCamelCase_ ( parameterized.TestCase ): '''simple docstring''' UpperCAmelCase__ = {} UpperCAmelCase__ = None @pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''') @pytest.mark.filterwarnings('''ignore:load_metric is deprecated:FutureWarning''') def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Tuple) ->Any: '''simple docstring''' A__ = '''[...]''' A__ = importlib.import_module( datasets.load.metric_module_factory(os.path.join('''metrics''' , UpperCAmelCase__)).module_path) A__ = datasets.load.import_main_class(metric_module.__name__ , dataset=UpperCAmelCase__) # check parameters A__ = inspect.signature(metric._compute).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values())) # no **kwargs # run doctest with self.patch_intensive_calls(UpperCAmelCase__ , metric_module.__name__): with self.use_local_metrics(): try: A__ = doctest.testmod(UpperCAmelCase__ , verbose=UpperCAmelCase__ , raise_on_error=UpperCAmelCase__) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed , 0) self.assertGreater(results.attempted , 1) @slow def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : List[str]) ->List[Any]: '''simple docstring''' A__ = '''[...]''' A__ = importlib.import_module( datasets.load.metric_module_factory(os.path.join('''metrics''' , UpperCAmelCase__)).module_path) # run doctest with self.use_local_metrics(): A__ = doctest.testmod(UpperCAmelCase__ , verbose=UpperCAmelCase__ , raise_on_error=UpperCAmelCase__) self.assertEqual(results.failed , 0) self.assertGreater(results.attempted , 1) @contextmanager def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any]) ->int: '''simple docstring''' if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](UpperCAmelCase__): yield else: yield @contextmanager def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int: '''simple docstring''' def load_local_metric(UpperCAmelCase__ : Dict , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : int): return load_metric(os.path.join('''metrics''' , UpperCAmelCase__) , *UpperCAmelCase__ , **UpperCAmelCase__) with patch('''datasets.load_metric''') as mock_load_metric: A__ = load_local_metric yield @classmethod def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , UpperCAmelCase__ : List[str]) ->Optional[Any]: '''simple docstring''' def wrapper(UpperCAmelCase__ : Dict): A__ = contextmanager(UpperCAmelCase__) A__ = patcher return patcher return wrapper @LocalMetricTest.register_intensive_calls_patcher('''bleurt''' ) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> str: """simple docstring""" import tensorflow.compat.va as tf from bleurt.score import Predictor tf.flags.DEFINE_string('''sv''' , '''''' , '''''' ) # handle pytest cli flags class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : str) ->Any: '''simple docstring''' assert len(input_dict['''input_ids''']) == 2 return np.array([1.03, 1.04]) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch('''bleurt.score._create_predictor''' ) as mock_create_predictor: A__ = MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher('''bertscore''' ) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple: """simple docstring""" import torch def bert_cos_score_idf(lowercase_ , lowercase_ , *lowercase_ , **lowercase_ ): return torch.tensor([[1.0, 1.0, 1.0]] * len(lowercase_ ) ) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch('''bert_score.scorer.get_model''' ), patch( '''bert_score.scorer.bert_cos_score_idf''' ) as mock_bert_cos_score_idf: A__ = bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher('''comet''' ) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Union[str, Any]: """simple docstring""" def load_from_checkpoint(lowercase_ ): class UpperCamelCase_ : '''simple docstring''' def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : str , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : int) ->Tuple: '''simple docstring''' assert len(UpperCAmelCase__) == 2 A__ = [0.19, 0.92] return scores, sum(UpperCAmelCase__) / len(UpperCAmelCase__) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch('''comet.download_model''' ) as mock_download_model: A__ = None with patch('''comet.load_from_checkpoint''' ) as mock_load_from_checkpoint: A__ = load_from_checkpoint yield def SCREAMING_SNAKE_CASE ( ) -> Optional[int]: """simple docstring""" A__ = load_metric(os.path.join('''metrics''' , '''seqeval''' ) ) A__ = '''ERROR''' A__ = f"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}""" with pytest.raises(lowercase_ , match=re.escape(lowercase_ ) ): metric.compute(predictions=[] , references=[] , scheme=lowercase_ )
14
import requests from bsa import BeautifulSoup def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str: """simple docstring""" A__ = BeautifulSoup(requests.get(lowercase_ , params=lowercase_ ).content , '''html.parser''' ) A__ = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} ) A__ = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' ) return anchors[2].get_text() if __name__ == "__main__": _lowerCamelCase : Optional[Any] = { """title""": ( """Precisely geometry controlled microsupercapacitors for ultrahigh areal """ """capacitance, volumetric capacitance, and energy density""" ), """journal""": """Chem. Mater.""", """volume""": 30, """pages""": """3979-3990""", """year""": 2018, """hl""": """en""", } print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params))
14
1
from __future__ import annotations import typing from collections.abc import Iterable import numpy as np _lowerCamelCase : str = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007 _lowerCamelCase : Tuple = typing.Union[np.floataa, int, float] # noqa: UP007 def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> VectorOut: """simple docstring""" return np.sqrt(np.sum((np.asarray(lowercase_ ) - np.asarray(lowercase_ )) ** 2 ) ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> VectorOut: """simple docstring""" return sum((va - va) ** 2 for va, va in zip(lowercase_ , lowercase_ ) ) ** (1 / 2) if __name__ == "__main__": def SCREAMING_SNAKE_CASE ( ) -> None: """simple docstring""" from timeit import timeit print('''Without Numpy''' ) print( timeit( '''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ) ) print('''With Numpy''' ) print( timeit( '''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ) ) benchmark()
14
import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[Any]: """simple docstring""" A__ = StableDiffusionPipeline.from_pretrained(lowercase_ , torch_dtype=torch.floataa ) # load LoRA weight from .safetensors A__ = load_file(lowercase_ ) A__ = [] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: A__ = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' ) A__ = pipeline.text_encoder else: A__ = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' ) A__ = pipeline.unet # find the target layer A__ = layer_infos.pop(0 ) while len(lowercase_ ) > -1: try: A__ = curr_layer.__getattr__(lowercase_ ) if len(lowercase_ ) > 0: A__ = layer_infos.pop(0 ) elif len(lowercase_ ) == 0: break except Exception: if len(lowercase_ ) > 0: temp_name += "_" + layer_infos.pop(0 ) else: A__ = layer_infos.pop(0 ) A__ = [] if "lora_down" in key: pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) ) pair_keys.append(lowercase_ ) else: pair_keys.append(lowercase_ ) pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) ) # update weight if len(state_dict[pair_keys[0]].shape ) == 4: A__ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) A__ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(lowercase_ , lowercase_ ).unsqueeze(2 ).unsqueeze(3 ) else: A__ = state_dict[pair_keys[0]].to(torch.floataa ) A__ = state_dict[pair_keys[1]].to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(lowercase_ , lowercase_ ) # update visited list for item in pair_keys: visited.append(lowercase_ ) return pipeline if __name__ == "__main__": _lowerCamelCase : Tuple = argparse.ArgumentParser() parser.add_argument( """--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format.""" ) parser.add_argument( """--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert.""" ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors""" ) parser.add_argument( """--lora_prefix_text_encoder""", default="""lora_te""", type=str, help="""The prefix of text encoder weight in safetensors""", ) parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""") parser.add_argument( """--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not.""" ) parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""") _lowerCamelCase : Tuple = parser.parse_args() _lowerCamelCase : List[Any] = args.base_model_path _lowerCamelCase : Optional[int] = args.checkpoint_path _lowerCamelCase : Dict = args.dump_path _lowerCamelCase : Optional[Any] = args.lora_prefix_unet _lowerCamelCase : Optional[int] = args.lora_prefix_text_encoder _lowerCamelCase : List[Any] = args.alpha _lowerCamelCase : int = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) _lowerCamelCase : Tuple = pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
14
1
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> list[list[float]]: """simple docstring""" A__ = [] for data in source_data: for i, el in enumerate(lowercase_ ): if len(lowercase_ ) < i + 1: data_lists.append([] ) data_lists[i].append(float(lowercase_ ) ) return data_lists def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> list[list[float]]: """simple docstring""" A__ = [] for dlist, weight in zip(lowercase_ , lowercase_ ): A__ = min(lowercase_ ) A__ = max(lowercase_ ) A__ = [] # for weight 0 score is 1 - actual score if weight == 0: for item in dlist: try: score.append(1 - ((item - mind) / (maxd - mind)) ) except ZeroDivisionError: score.append(1 ) elif weight == 1: for item in dlist: try: score.append((item - mind) / (maxd - mind) ) except ZeroDivisionError: score.append(0 ) # weight not 0 or 1 else: A__ = f"""Invalid weight of {weight:f} provided""" raise ValueError(lowercase_ ) score_lists.append(lowercase_ ) return score_lists def SCREAMING_SNAKE_CASE ( lowercase_ ) -> list[float]: """simple docstring""" A__ = [0 for i in range(len(score_lists[0] ) )] for slist in score_lists: for j, ele in enumerate(lowercase_ ): A__ = final_scores[j] + ele return final_scores def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> list[list[float]]: """simple docstring""" A__ = get_data(lowercase_ ) A__ = calculate_each_score(lowercase_ , lowercase_ ) A__ = generate_final_scores(lowercase_ ) # append scores to source data for i, ele in enumerate(lowercase_ ): source_data[i].append(lowercase_ ) return source_data
14
import os import pytest from transformers.dynamic_module_utils import get_imports _lowerCamelCase : Any = """ import os """ _lowerCamelCase : Optional[int] = """ def foo(): import os return False """ _lowerCamelCase : List[Any] = """ def foo(): def bar(): if True: import os return False return bar() """ _lowerCamelCase : List[Any] = """ import os try: import bar except ImportError: raise ValueError() """ _lowerCamelCase : Union[str, Any] = """ import os def foo(): try: import bar except ImportError: raise ValueError() """ _lowerCamelCase : List[Any] = """ import os try: import bar except (ImportError, AttributeError): raise ValueError() """ _lowerCamelCase : List[Any] = """ import os try: import bar except ImportError as e: raise ValueError() """ _lowerCamelCase : str = """ import os try: import bar except: raise ValueError() """ _lowerCamelCase : Optional[Any] = """ import os try: import bar import baz except ImportError: raise ValueError() """ _lowerCamelCase : Any = """ import os try: import bar import baz except ImportError: x = 1 raise ValueError() """ _lowerCamelCase : Dict = [ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize('''case''' , lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int: """simple docstring""" A__ = os.path.join(lowercase_ , '''test_file.py''' ) with open(lowercase_ , '''w''' ) as _tmp_file: _tmp_file.write(lowercase_ ) A__ = get_imports(lowercase_ ) assert parsed_imports == ["os"]
14
1
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Value from .base import TaskTemplate @dataclass(frozen=UpperCAmelCase__ ) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = field(default='''text-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) UpperCAmelCase__ = Features({'''text''': Value('''string''' )} ) UpperCAmelCase__ = Features({'''labels''': ClassLabel} ) UpperCAmelCase__ = "text" UpperCAmelCase__ = "labels" def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : List[Any]) ->Dict: '''simple docstring''' if self.label_column not in features: raise ValueError(f"""Column {self.label_column} is not present in features.""") if not isinstance(features[self.label_column] , UpperCAmelCase__): raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""") A__ = copy.deepcopy(self) A__ = self.label_schema.copy() A__ = features[self.label_column] A__ = label_schema return task_template @property def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Dict[str, str]: '''simple docstring''' return { self.text_column: "text", self.label_column: "labels", }
14
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int: """simple docstring""" return int(input_a == input_a == 0 ) def SCREAMING_SNAKE_CASE ( ) -> None: """simple docstring""" print('''Truth Table of NOR Gate:''' ) print('''| Input 1 | Input 2 | Output |''' ) print(f"""| 0 | 0 | {nor_gate(0 , 0 )} |""" ) print(f"""| 0 | 1 | {nor_gate(0 , 1 )} |""" ) print(f"""| 1 | 0 | {nor_gate(1 , 0 )} |""" ) print(f"""| 1 | 1 | {nor_gate(1 , 1 )} |""" ) if __name__ == "__main__": import doctest doctest.testmod() main()
14
1
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any: """simple docstring""" A__ = [0] * len(lowercase_ ) A__ = [] A__ = [1] * len(lowercase_ ) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(lowercase_ ) ): if indegree[i] == 0: queue.append(lowercase_ ) while queue: A__ = queue.pop(0 ) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: A__ = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(lowercase_ ) print(max(lowercase_ ) ) # Adjacency list of Graph _lowerCamelCase : Optional[int] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
14
import os import sys import unittest _lowerCamelCase : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) _lowerCamelCase : Any = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""") _lowerCamelCase : str = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""") class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE ( self : Tuple) ->Tuple: '''simple docstring''' A__ = get_test_to_tester_mapping(UpperCAmelCase__) A__ = get_test_to_tester_mapping(UpperCAmelCase__) A__ = {'''BertModelTest''': '''BertModelTester'''} A__ = { '''BlipModelTest''': '''BlipModelTester''', '''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''', '''BlipTextModelTest''': '''BlipTextModelTester''', '''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''', '''BlipVQAModelTest''': '''BlipVQAModelTester''', '''BlipVisionModelTest''': '''BlipVisionModelTester''', } self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__) self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[Any]: '''simple docstring''' A__ = get_model_to_test_mapping(UpperCAmelCase__) A__ = get_model_to_test_mapping(UpperCAmelCase__) A__ = { '''BertForMaskedLM''': ['''BertModelTest'''], '''BertForMultipleChoice''': ['''BertModelTest'''], '''BertForNextSentencePrediction''': ['''BertModelTest'''], '''BertForPreTraining''': ['''BertModelTest'''], '''BertForQuestionAnswering''': ['''BertModelTest'''], '''BertForSequenceClassification''': ['''BertModelTest'''], '''BertForTokenClassification''': ['''BertModelTest'''], '''BertLMHeadModel''': ['''BertModelTest'''], '''BertModel''': ['''BertModelTest'''], } A__ = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''], '''BlipModel''': ['''BlipModelTest'''], '''BlipTextModel''': ['''BlipTextModelTest'''], '''BlipVisionModel''': ['''BlipVisionModelTest'''], } self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__) self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->str: '''simple docstring''' A__ = get_model_to_tester_mapping(UpperCAmelCase__) A__ = get_model_to_tester_mapping(UpperCAmelCase__) A__ = { '''BertForMaskedLM''': ['''BertModelTester'''], '''BertForMultipleChoice''': ['''BertModelTester'''], '''BertForNextSentencePrediction''': ['''BertModelTester'''], '''BertForPreTraining''': ['''BertModelTester'''], '''BertForQuestionAnswering''': ['''BertModelTester'''], '''BertForSequenceClassification''': ['''BertModelTester'''], '''BertForTokenClassification''': ['''BertModelTester'''], '''BertLMHeadModel''': ['''BertModelTester'''], '''BertModel''': ['''BertModelTester'''], } A__ = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''], '''BlipModel''': ['''BlipModelTester'''], '''BlipTextModel''': ['''BlipTextModelTester'''], '''BlipVisionModel''': ['''BlipVisionModelTester'''], } self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__) self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
14
1
import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict: """simple docstring""" A__ = args.pruning_method A__ = args.threshold A__ = args.model_name_or_path.rstrip('''/''' ) A__ = args.target_model_path print(f"""Load fine-pruned model from {model_name_or_path}""" ) A__ = torch.load(os.path.join(lowercase_ , '''pytorch_model.bin''' ) ) A__ = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: A__ = tensor print(f"""Copied layer {name}""" ) elif "classifier" in name or "qa_output" in name: A__ = tensor print(f"""Copied layer {name}""" ) elif "bias" in name: A__ = tensor print(f"""Copied layer {name}""" ) else: if pruning_method == "magnitude": A__ = MagnitudeBinarizer.apply(inputs=lowercase_ , threshold=lowercase_ ) A__ = tensor * mask print(f"""Pruned layer {name}""" ) elif pruning_method == "topK": if "mask_scores" in name: continue A__ = name[:-6] A__ = model[f"""{prefix_}mask_scores"""] A__ = TopKBinarizer.apply(lowercase_ , lowercase_ ) A__ = tensor * mask print(f"""Pruned layer {name}""" ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue A__ = name[:-6] A__ = model[f"""{prefix_}mask_scores"""] A__ = ThresholdBinarizer.apply(lowercase_ , lowercase_ , lowercase_ ) A__ = tensor * mask print(f"""Pruned layer {name}""" ) elif pruning_method == "l0": if "mask_scores" in name: continue A__ = name[:-6] A__ = model[f"""{prefix_}mask_scores"""] A__ , A__ = -0.1, 1.1 A__ = torch.sigmoid(lowercase_ ) A__ = s * (r - l) + l A__ = s_bar.clamp(min=0.0 , max=1.0 ) A__ = tensor * mask print(f"""Pruned layer {name}""" ) else: raise ValueError('''Unknown pruning method''' ) if target_model_path is None: A__ = os.path.join( os.path.dirname(lowercase_ ) , f"""bertarized_{os.path.basename(lowercase_ )}""" ) if not os.path.isdir(lowercase_ ): shutil.copytree(lowercase_ , lowercase_ ) print(f"""\nCreated folder {target_model_path}""" ) torch.save(lowercase_ , os.path.join(lowercase_ , '''pytorch_model.bin''' ) ) print('''\nPruned model saved! See you later!''' ) if __name__ == "__main__": _lowerCamelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument( """--pruning_method""", choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""], type=str, required=True, help=( """Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,""" """ sigmoied_threshold = Soft movement pruning)""" ), ) parser.add_argument( """--threshold""", type=float, required=False, help=( """For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.""" """For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.""" """Not needed for `l0`""" ), ) parser.add_argument( """--model_name_or_path""", type=str, required=True, help="""Folder containing the model that was previously fine-pruned""", ) parser.add_argument( """--target_model_path""", default=None, type=str, required=False, help="""Folder containing the model that was previously fine-pruned""", ) _lowerCamelCase : int = parser.parse_args() main(args)
14
import inspect import unittest from typing import List import numpy as np from transformers import EfficientFormerConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, ) from transformers.models.efficientformer.modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import EfficientFormerImageProcessor class UpperCamelCase_ : '''simple docstring''' def __init__( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int = 13 , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : int = 128 , UpperCAmelCase__ : Optional[Any]=[16, 32, 64, 128] , UpperCAmelCase__ : int = 7 , UpperCAmelCase__ : int = 4 , UpperCAmelCase__ : int = 37 , UpperCAmelCase__ : str = "gelu" , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : int = 10 , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : int = 128 , UpperCAmelCase__ : List[int] = [2, 2, 2, 2] , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , ) ->List[Any]: '''simple docstring''' A__ = parent A__ = batch_size A__ = image_size A__ = patch_size A__ = num_channels A__ = is_training A__ = use_labels A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = type_sequence_label_size A__ = initializer_range A__ = encoder_stride A__ = num_attention_outputs A__ = embed_dim A__ = embed_dim + 1 A__ = resolution A__ = depths A__ = hidden_sizes A__ = dim A__ = mlp_expansion_ratio def SCREAMING_SNAKE_CASE ( self : List[Any]) ->str: '''simple docstring''' A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size) A__ = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE ( self : int) ->str: '''simple docstring''' return EfficientFormerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict) ->Dict: '''simple docstring''' A__ = TFEfficientFormerModel(config=UpperCAmelCase__) A__ = model(UpperCAmelCase__ , training=UpperCAmelCase__) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str) ->Union[str, Any]: '''simple docstring''' A__ = self.type_sequence_label_size A__ = TFEfficientFormerForImageClassification(UpperCAmelCase__) A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__ , training=UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) # test greyscale images A__ = 1 A__ = TFEfficientFormerForImageClassification(UpperCAmelCase__) A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def SCREAMING_SNAKE_CASE ( self : int) ->List[str]: '''simple docstring''' A__ = self.prepare_config_and_inputs() A__ , A__ , A__ = config_and_inputs A__ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = ( ( TFEfficientFormerModel, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerForImageClassification, ) if is_tf_available() else () ) UpperCAmelCase__ = ( { '''feature-extraction''': TFEfficientFormerModel, '''image-classification''': ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, ), } if is_tf_available() else {} ) UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[str]: '''simple docstring''' A__ = TFEfficientFormerModelTester(self) A__ = ConfigTester( self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37) def SCREAMING_SNAKE_CASE ( self : int) ->Any: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''EfficientFormer does not use inputs_embeds''') def SCREAMING_SNAKE_CASE ( self : List[str]) ->Dict: '''simple docstring''' pass @unittest.skip(reason='''EfficientFormer does not support input and output embeddings''') def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]: '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[Any]: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(UpperCAmelCase__) A__ = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ = [*signature.parameters.keys()] A__ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : str) ->Any: '''simple docstring''' def check_hidden_states_output(UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict): A__ = model_class(UpperCAmelCase__) A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__) A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A__ = getattr( self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1) self.assertEqual(len(UpperCAmelCase__) , UpperCAmelCase__) if hasattr(self.model_tester , '''encoder_seq_length'''): A__ = self.model_tester.encoder_seq_length if hasattr(self.model_tester , '''chunk_length''') and self.model_tester.chunk_length > 1: A__ = seq_length * self.model_tester.chunk_length else: A__ = self.model_tester.seq_length self.assertListEqual( list(hidden_states[-1].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , ) if config.is_encoder_decoder: A__ = outputs.decoder_hidden_states self.asseretIsInstance(UpperCAmelCase__ , (list, tuple)) self.assertEqual(len(UpperCAmelCase__) , UpperCAmelCase__) A__ = getattr(self.model_tester , '''seq_length''' , UpperCAmelCase__) A__ = getattr(self.model_tester , '''decoder_seq_length''' , UpperCAmelCase__) self.assertListEqual( list(hidden_states[-1].shape[-2:]) , [decoder_seq_length, self.model_tester.hidden_size] , ) A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = True check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A__ = True check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict=False) ->int: '''simple docstring''' A__ = super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__) if return_labels: if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__) @unittest.skip(reason='''EfficientFormer does not implement masked image modeling yet''') def SCREAMING_SNAKE_CASE ( self : str) ->str: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Any) ->Tuple: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__) @slow def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]: '''simple docstring''' for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ = TFEfficientFormerModel.from_pretrained(UpperCAmelCase__) self.assertIsNotNone(UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Any) ->str: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = True A__ = getattr(self.model_tester , '''seq_length''' , UpperCAmelCase__) A__ = getattr(self.model_tester , '''encoder_seq_length''' , UpperCAmelCase__) A__ = getattr(self.model_tester , '''key_length''' , UpperCAmelCase__) A__ = getattr(self.model_tester , '''chunk_length''' , UpperCAmelCase__) if chunk_length is not None and hasattr(self.model_tester , '''num_hashes'''): A__ = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: A__ = True A__ = False A__ = True A__ = model_class(UpperCAmelCase__) A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__) A__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(UpperCAmelCase__) , self.model_tester.num_attention_outputs) # check that output_attentions also work using config del inputs_dict["output_attentions"] A__ = True A__ = model_class(UpperCAmelCase__) A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__) A__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(UpperCAmelCase__) , self.model_tester.num_attention_outputs) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:]) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , ) else: self.assertListEqual( list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , ) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[Any]: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Prepare our model A__ = model_class(UpperCAmelCase__) # These are maximally general inputs for the model, with multiple None dimensions # Hopefully this will catch any conditionals that fail for flexible shapes A__ = { key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=UpperCAmelCase__) for key, val in model.input_signature.items() if key in model.dummy_inputs } A__ = model(UpperCAmelCase__) self.assertTrue(outputs_dict is not None) def SCREAMING_SNAKE_CASE ( ) -> Any: """simple docstring""" A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: '''simple docstring''' return ( EfficientFormerImageProcessor.from_pretrained('''snap-research/efficientformer-l1-300''') if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE ( self : List[str]) ->Any: '''simple docstring''' A__ = TFEfficientFormerForImageClassification.from_pretrained('''snap-research/efficientformer-l1-300''') A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(images=UpperCAmelCase__ , return_tensors='''tf''') # forward pass A__ = model(**UpperCAmelCase__ , training=UpperCAmelCase__) # verify the logits A__ = tf.TensorShape((1, 1_000)) self.assertEqual(outputs.logits.shape , UpperCAmelCase__) A__ = tf.constant([-0.0555, 0.4825, -0.0852]) self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4)) @slow def SCREAMING_SNAKE_CASE ( self : Dict) ->int: '''simple docstring''' A__ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained( '''snap-research/efficientformer-l1-300''') A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(images=UpperCAmelCase__ , return_tensors='''tf''') # forward pass A__ = model(**UpperCAmelCase__ , training=UpperCAmelCase__) # verify the logits A__ = tf.TensorShape((1, 1_000)) self.assertEqual(outputs.logits.shape , UpperCAmelCase__) A__ = tf.constant([-0.1312, 0.4353, -1.0499]) self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4))
14
1
import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[Any]: """simple docstring""" A__ = FileLock(str(tmpdir / '''foo.lock''' ) ) A__ = FileLock(str(tmpdir / '''foo.lock''' ) ) A__ = 0.01 with locka.acquire(): with pytest.raises(lowercase_ ): A__ = time.time() locka.acquire(lowercase_ ) assert time.time() - _start > timeout def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]: """simple docstring""" A__ = '''a''' * 1_000 + '''.lock''' A__ = FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith('''.lock''' ) assert not locka._lock_file.endswith(lowercase_ ) assert len(os.path.basename(locka._lock_file ) ) <= 255 A__ = FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(lowercase_ ): locka.acquire(0 )
14
from __future__ import annotations def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> tuple[float, list[float]]: """simple docstring""" A__ = list(range(len(lowercase_ ) ) ) A__ = [v / w for v, w in zip(lowercase_ , lowercase_ )] index.sort(key=lambda lowercase_ : ratio[i] , reverse=lowercase_ ) A__ = 0 A__ = [0] * len(lowercase_ ) for i in index: if weight[i] <= capacity: A__ = 1 max_value += value[i] capacity -= weight[i] else: A__ = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
14
1
from dataclasses import dataclass from typing import Dict, Optional, Union import torch import torch.nn.functional as F from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .attention_processor import AttentionProcessor, AttnProcessor from .embeddings import TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin @dataclass class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = 42 class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ): '''simple docstring''' @register_to_config def __init__( self : Dict , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : int = 20 , UpperCAmelCase__ : int = 768 , UpperCAmelCase__ : str=77 , UpperCAmelCase__ : Optional[int]=4 , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : str = "silu" , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = "linear" , UpperCAmelCase__ : Optional[str] = "prd" , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[int] = None , ) ->List[str]: '''simple docstring''' super().__init__() A__ = num_attention_heads A__ = attention_head_dim A__ = num_attention_heads * attention_head_dim A__ = additional_embeddings A__ = time_embed_dim or inner_dim A__ = embedding_proj_dim or embedding_dim A__ = clip_embed_dim or embedding_dim A__ = Timesteps(UpperCAmelCase__ , UpperCAmelCase__ , 0) A__ = TimestepEmbedding(UpperCAmelCase__ , UpperCAmelCase__ , out_dim=UpperCAmelCase__ , act_fn=UpperCAmelCase__) A__ = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__) if embedding_proj_norm_type is None: A__ = None elif embedding_proj_norm_type == "layer": A__ = nn.LayerNorm(UpperCAmelCase__) else: raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""") A__ = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__) if encoder_hid_proj_type is None: A__ = None elif encoder_hid_proj_type == "linear": A__ = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__) else: raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""") A__ = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , UpperCAmelCase__)) if added_emb_type == "prd": A__ = nn.Parameter(torch.zeros(1 , 1 , UpperCAmelCase__)) elif added_emb_type is None: A__ = None else: raise ValueError( f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""") A__ = nn.ModuleList( [ BasicTransformerBlock( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , dropout=UpperCAmelCase__ , activation_fn='''gelu''' , attention_bias=UpperCAmelCase__ , ) for d in range(UpperCAmelCase__) ]) if norm_in_type == "layer": A__ = nn.LayerNorm(UpperCAmelCase__) elif norm_in_type is None: A__ = None else: raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""") A__ = nn.LayerNorm(UpperCAmelCase__) A__ = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__) A__ = torch.full( [num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0) causal_attention_mask.triu_(1) A__ = causal_attention_mask[None, ...] self.register_buffer('''causal_attention_mask''' , UpperCAmelCase__ , persistent=UpperCAmelCase__) A__ = nn.Parameter(torch.zeros(1 , UpperCAmelCase__)) A__ = nn.Parameter(torch.zeros(1 , UpperCAmelCase__)) @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def SCREAMING_SNAKE_CASE ( self : int) ->Dict[str, AttentionProcessor]: '''simple docstring''' A__ = {} def fn_recursive_add_processors(UpperCAmelCase__ : str , UpperCAmelCase__ : torch.nn.Module , UpperCAmelCase__ : Dict[str, AttentionProcessor]): if hasattr(UpperCAmelCase__ , '''set_processor'''): A__ = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(f"""{name}.{sub_name}""" , UpperCAmelCase__ , UpperCAmelCase__) return processors for name, module in self.named_children(): fn_recursive_add_processors(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__) return processors def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Union[AttentionProcessor, Dict[str, AttentionProcessor]]) ->Optional[Any]: '''simple docstring''' A__ = len(self.attn_processors.keys()) if isinstance(UpperCAmelCase__ , UpperCAmelCase__) and len(UpperCAmelCase__) != count: raise ValueError( f"""A dict of processors was passed, but the number of processors {len(UpperCAmelCase__)} does not match the""" f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""") def fn_recursive_attn_processor(UpperCAmelCase__ : str , UpperCAmelCase__ : torch.nn.Module , UpperCAmelCase__ : List[str]): if hasattr(UpperCAmelCase__ , '''set_processor'''): if not isinstance(UpperCAmelCase__ , UpperCAmelCase__): module.set_processor(UpperCAmelCase__) else: module.set_processor(processor.pop(f"""{name}.processor""")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"""{name}.{sub_name}""" , UpperCAmelCase__ , UpperCAmelCase__) for name, module in self.named_children(): fn_recursive_attn_processor(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict: '''simple docstring''' self.set_attn_processor(AttnProcessor()) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[torch.Tensor, float, int] , UpperCAmelCase__ : torch.FloatTensor , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[torch.BoolTensor] = None , UpperCAmelCase__ : bool = True , ) ->Tuple: '''simple docstring''' A__ = hidden_states.shape[0] A__ = timestep if not torch.is_tensor(UpperCAmelCase__): A__ = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device) elif torch.is_tensor(UpperCAmelCase__) and len(timesteps.shape) == 0: A__ = timesteps[None].to(hidden_states.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML A__ = timesteps * torch.ones(UpperCAmelCase__ , dtype=timesteps.dtype , device=timesteps.device) A__ = self.time_proj(UpperCAmelCase__) # timesteps does not contain any weights and will always return f32 tensors # but time_embedding might be fp16, so we need to cast here. A__ = timesteps_projected.to(dtype=self.dtype) A__ = self.time_embedding(UpperCAmelCase__) if self.embedding_proj_norm is not None: A__ = self.embedding_proj_norm(UpperCAmelCase__) A__ = self.embedding_proj(UpperCAmelCase__) if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None: A__ = self.encoder_hidden_states_proj(UpperCAmelCase__) elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None: raise ValueError('''`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set''') A__ = self.proj_in(UpperCAmelCase__) A__ = self.positional_embedding.to(hidden_states.dtype) A__ = [] A__ = 0 if encoder_hidden_states is not None: additional_embeds.append(UpperCAmelCase__) additional_embeddings_len += encoder_hidden_states.shape[1] if len(proj_embeddings.shape) == 2: A__ = proj_embeddings[:, None, :] if len(hidden_states.shape) == 2: A__ = hidden_states[:, None, :] A__ = additional_embeds + [ proj_embeddings, time_embeddings[:, None, :], hidden_states, ] if self.prd_embedding is not None: A__ = self.prd_embedding.to(hidden_states.dtype).expand(UpperCAmelCase__ , -1 , -1) additional_embeds.append(UpperCAmelCase__) A__ = torch.cat( UpperCAmelCase__ , dim=1 , ) # Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens A__ = additional_embeddings_len + proj_embeddings.shape[1] + 1 if positional_embeddings.shape[1] < hidden_states.shape[1]: A__ = F.pad( UpperCAmelCase__ , ( 0, 0, additional_embeddings_len, self.prd_embedding.shape[1] if self.prd_embedding is not None else 0, ) , value=0.0 , ) A__ = hidden_states + positional_embeddings if attention_mask is not None: A__ = (1 - attention_mask.to(hidden_states.dtype)) * -10000.0 A__ = F.pad(UpperCAmelCase__ , (0, self.additional_embeddings) , value=0.0) A__ = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype) A__ = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0) if self.norm_in is not None: A__ = self.norm_in(UpperCAmelCase__) for block in self.transformer_blocks: A__ = block(UpperCAmelCase__ , attention_mask=UpperCAmelCase__) A__ = self.norm_out(UpperCAmelCase__) if self.prd_embedding is not None: A__ = hidden_states[:, -1] else: A__ = hidden_states[:, additional_embeddings_len:] A__ = self.proj_to_clip_embeddings(UpperCAmelCase__) if not return_dict: return (predicted_image_embedding,) return PriorTransformerOutput(predicted_image_embedding=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Any) ->Dict: '''simple docstring''' A__ = (prior_latents * self.clip_std) + self.clip_mean return prior_latents
14
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[Any]: """simple docstring""" A__ = args.log_outputs A__ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] ) # load metric A__ = load_metric('''wer''' ) A__ = load_metric('''cer''' ) # compute metrics A__ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] ) A__ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] ) # print & log results A__ = f"""WER: {wer_result}\nCER: {cer_result}""" print(lowercase_ ) with open(f"""{dataset_id}_eval_results.txt""" , '''w''' ) as f: f.write(lowercase_ ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: A__ = f"""log_{dataset_id}_predictions.txt""" A__ = f"""log_{dataset_id}_targets.txt""" with open(lowercase_ , '''w''' ) as p, open(lowercase_ , '''w''' ) as t: # mapping function to write output def write_to_file(lowercase_ , lowercase_ ): p.write(f"""{i}""" + '''\n''' ) p.write(batch['''prediction'''] + '''\n''' ) t.write(f"""{i}""" + '''\n''' ) t.write(batch['''target'''] + '''\n''' ) result.map(lowercase_ , with_indices=lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> str: """simple docstring""" A__ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training A__ = re.sub(lowercase_ , '''''' , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! A__ = ['''\n\n''', '''\n''', ''' ''', ''' '''] for t in token_sequences_to_ignore: A__ = ''' '''.join(text.split(lowercase_ ) ) return text def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]: """simple docstring""" A__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowercase_ ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor A__ = AutoFeatureExtractor.from_pretrained(args.model_id ) A__ = feature_extractor.sampling_rate # resample audio A__ = dataset.cast_column('''audio''' , Audio(sampling_rate=lowercase_ ) ) # load eval pipeline if args.device is None: A__ = 0 if torch.cuda.is_available() else -1 A__ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(lowercase_ ): A__ = asr( batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) A__ = prediction['''text'''] A__ = normalize_text(batch['''sentence'''] ) return batch # run inference on all examples A__ = dataset.map(lowercase_ , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(lowercase_ , lowercase_ ) if __name__ == "__main__": _lowerCamelCase : List[Any] = argparse.ArgumentParser() parser.add_argument( """--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers""" ) parser.add_argument( """--dataset""", type=str, required=True, help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""", ) parser.add_argument( """--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice""" ) parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""") parser.add_argument( """--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds.""" ) parser.add_argument( """--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second.""" ) parser.add_argument( """--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis.""" ) parser.add_argument( """--device""", type=int, default=None, help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""", ) _lowerCamelCase : str = parser.parse_args() main(args)
14
1
import random import unittest from torch.utils.data import BatchSampler, DataLoader, IterableDataset from accelerate import Accelerator from accelerate.data_loader import ( BatchSamplerShard, DataLoaderDispatcher, DataLoaderShard, IterableDatasetShard, SkipBatchSampler, SkipDataLoader, skip_first_batches, ) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : Dict , UpperCAmelCase__ : Optional[int]=0.01 , UpperCAmelCase__ : Optional[Any]=1_000) ->Optional[int]: '''simple docstring''' A__ = p_stop A__ = max_length def __iter__( self : List[Any]) ->List[Any]: '''simple docstring''' A__ = 0 A__ = False while not stop and count < self.max_length: yield count count += 1 A__ = random.random() < self.p_stop class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str]=False , UpperCAmelCase__ : Any=True) ->Optional[Any]: '''simple docstring''' A__ = [ BatchSamplerShard(UpperCAmelCase__ , 2 , UpperCAmelCase__ , split_batches=UpperCAmelCase__ , even_batches=UpperCAmelCase__) for i in range(2) ] A__ = [list(UpperCAmelCase__) for batch_sampler_shard in batch_sampler_shards] if not split_batches: self.assertListEqual([len(UpperCAmelCase__) for shard in batch_sampler_shards] , [len(UpperCAmelCase__) for e in expected]) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Dict) ->Any: '''simple docstring''' A__ = BatchSampler(range(24) , batch_size=3 , drop_last=UpperCAmelCase__) A__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__) A__ = BatchSampler(range(24) , batch_size=3 , drop_last=UpperCAmelCase__) # Expected shouldn't change self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__) # Check the shards when the dataset is a round multiple of batch size but not total batch size. A__ = BatchSampler(range(21) , batch_size=3 , drop_last=UpperCAmelCase__) A__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__) A__ = BatchSampler(range(21) , batch_size=3 , drop_last=UpperCAmelCase__) A__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. A__ = BatchSampler(range(22) , batch_size=3 , drop_last=UpperCAmelCase__) A__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__) A__ = BatchSampler(range(22) , batch_size=3 , drop_last=UpperCAmelCase__) A__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. A__ = BatchSampler(range(20) , batch_size=3 , drop_last=UpperCAmelCase__) A__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__) A__ = BatchSampler(range(20) , batch_size=3 , drop_last=UpperCAmelCase__) A__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__) # Check the shards when the dataset is very small. A__ = BatchSampler(range(2) , batch_size=3 , drop_last=UpperCAmelCase__) A__ = [[[0, 1, 0]], [[1, 0, 1]]] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__) A__ = BatchSampler(range(2) , batch_size=3 , drop_last=UpperCAmelCase__) A__ = [[], []] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[Any]: '''simple docstring''' A__ = BatchSampler(range(24) , batch_size=4 , drop_last=UpperCAmelCase__) A__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__) A__ = BatchSampler(range(24) , batch_size=4 , drop_last=UpperCAmelCase__) # Expected shouldn't change self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__) # Check the shards when the dataset is not a round multiple of batch size. A__ = BatchSampler(range(22) , batch_size=4 , drop_last=UpperCAmelCase__) A__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__) A__ = BatchSampler(range(22) , batch_size=4 , drop_last=UpperCAmelCase__) A__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__) # Check the shards when the dataset is not a round multiple of batch size or num_processes. A__ = BatchSampler(range(21) , batch_size=4 , drop_last=UpperCAmelCase__) A__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__) A__ = BatchSampler(range(21) , batch_size=4 , drop_last=UpperCAmelCase__) A__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__) # Check the shards when the dataset is very small. A__ = BatchSampler(range(2) , batch_size=4 , drop_last=UpperCAmelCase__) A__ = [[[0, 1]], [[0, 1]]] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__) A__ = BatchSampler(range(2) , batch_size=4 , drop_last=UpperCAmelCase__) A__ = [[], []] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]: '''simple docstring''' A__ = BatchSampler(range(24) , batch_size=3 , drop_last=UpperCAmelCase__) A__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__) A__ = BatchSampler(range(24) , batch_size=3 , drop_last=UpperCAmelCase__) # Expected shouldn't change self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__) # Check the shards when the dataset is a round multiple of batch size but not total batch size. A__ = BatchSampler(range(21) , batch_size=3 , drop_last=UpperCAmelCase__) A__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__) A__ = BatchSampler(range(21) , batch_size=3 , drop_last=UpperCAmelCase__) A__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. A__ = BatchSampler(range(22) , batch_size=3 , drop_last=UpperCAmelCase__) A__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__) A__ = BatchSampler(range(22) , batch_size=3 , drop_last=UpperCAmelCase__) A__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. A__ = BatchSampler(range(20) , batch_size=3 , drop_last=UpperCAmelCase__) A__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__) A__ = BatchSampler(range(20) , batch_size=3 , drop_last=UpperCAmelCase__) A__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__) # Check the shards when the dataset is very small. A__ = BatchSampler(range(2) , batch_size=3 , drop_last=UpperCAmelCase__) A__ = [[[0, 1]], []] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__) A__ = BatchSampler(range(2) , batch_size=3 , drop_last=UpperCAmelCase__) A__ = [[], []] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Tuple: '''simple docstring''' A__ = BatchSampler(range(24) , batch_size=4 , drop_last=UpperCAmelCase__) A__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ , even_batches=UpperCAmelCase__) A__ = BatchSampler(range(24) , batch_size=4 , drop_last=UpperCAmelCase__) # Expected shouldn't change self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ , even_batches=UpperCAmelCase__) # Check the shards when the dataset is not a round multiple of batch size. A__ = BatchSampler(range(22) , batch_size=4 , drop_last=UpperCAmelCase__) A__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ , even_batches=UpperCAmelCase__) A__ = BatchSampler(range(22) , batch_size=4 , drop_last=UpperCAmelCase__) A__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ , even_batches=UpperCAmelCase__) # Check the shards when the dataset is not a round multiple of batch size or num_processes. A__ = BatchSampler(range(21) , batch_size=4 , drop_last=UpperCAmelCase__) A__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ , even_batches=UpperCAmelCase__) A__ = BatchSampler(range(21) , batch_size=4 , drop_last=UpperCAmelCase__) A__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ , even_batches=UpperCAmelCase__) # Check the shards when the dataset is very small. A__ = BatchSampler(range(2) , batch_size=4 , drop_last=UpperCAmelCase__) A__ = [[[0, 1]], []] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ , even_batches=UpperCAmelCase__) A__ = BatchSampler(range(2) , batch_size=4 , drop_last=UpperCAmelCase__) A__ = [[], []] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ , even_batches=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any: '''simple docstring''' A__ = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]] A__ = [BatchSamplerShard(UpperCAmelCase__ , 2 , UpperCAmelCase__ , even_batches=UpperCAmelCase__) for i in range(2)] self.assertEqual(len(batch_sampler_shards[0]) , 3) self.assertEqual(len(batch_sampler_shards[1]) , 2) self.assertListEqual(list(batch_sampler_shards[0]) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]]) self.assertListEqual(list(batch_sampler_shards[1]) , [[3, 4], [9, 10, 11]]) def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple=False , UpperCAmelCase__ : Optional[Any]=2 , UpperCAmelCase__ : List[str]=False) ->Optional[Any]: '''simple docstring''' random.seed(UpperCAmelCase__) A__ = list(UpperCAmelCase__) A__ = [ IterableDatasetShard( UpperCAmelCase__ , batch_size=UpperCAmelCase__ , drop_last=UpperCAmelCase__ , num_processes=UpperCAmelCase__ , process_index=UpperCAmelCase__ , split_batches=UpperCAmelCase__ , ) for i in range(UpperCAmelCase__) ] A__ = [] for iterable_dataset_shard in iterable_dataset_shards: # Since our random iterable dataset will be... random... we need to use a seed to get reproducible results. random.seed(UpperCAmelCase__) iterable_dataset_lists.append(list(UpperCAmelCase__)) A__ = batch_size // num_processes if split_batches else batch_size # All iterable dataset shard should have the same length, a round multiple of shard_batch_size A__ = iterable_dataset_lists[0] for l in iterable_dataset_lists[1:]: self.assertEqual(len(UpperCAmelCase__) , len(UpperCAmelCase__)) self.assertTrue(len(UpperCAmelCase__) % shard_batch_size == 0) A__ = [] for idx in range(0 , len(UpperCAmelCase__) , UpperCAmelCase__): for l in iterable_dataset_lists: observed += l[idx : idx + shard_batch_size] if not drop_last: while len(UpperCAmelCase__) < len(UpperCAmelCase__): reference += reference self.assertListEqual(UpperCAmelCase__ , reference[: len(UpperCAmelCase__)]) def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[Any]: '''simple docstring''' A__ = 42 A__ = RandomIterableDataset() self.check_iterable_dataset_shards(UpperCAmelCase__ , UpperCAmelCase__ , batch_size=4 , drop_last=UpperCAmelCase__ , split_batches=UpperCAmelCase__) self.check_iterable_dataset_shards(UpperCAmelCase__ , UpperCAmelCase__ , batch_size=4 , drop_last=UpperCAmelCase__ , split_batches=UpperCAmelCase__) self.check_iterable_dataset_shards(UpperCAmelCase__ , UpperCAmelCase__ , batch_size=4 , drop_last=UpperCAmelCase__ , split_batches=UpperCAmelCase__) self.check_iterable_dataset_shards(UpperCAmelCase__ , UpperCAmelCase__ , batch_size=4 , drop_last=UpperCAmelCase__ , split_batches=UpperCAmelCase__) # Edge case with a very small dataset A__ = RandomIterableDataset(max_length=2) self.check_iterable_dataset_shards(UpperCAmelCase__ , UpperCAmelCase__ , batch_size=4 , drop_last=UpperCAmelCase__ , split_batches=UpperCAmelCase__) self.check_iterable_dataset_shards(UpperCAmelCase__ , UpperCAmelCase__ , batch_size=4 , drop_last=UpperCAmelCase__ , split_batches=UpperCAmelCase__) self.check_iterable_dataset_shards(UpperCAmelCase__ , UpperCAmelCase__ , batch_size=4 , drop_last=UpperCAmelCase__ , split_batches=UpperCAmelCase__) self.check_iterable_dataset_shards(UpperCAmelCase__ , UpperCAmelCase__ , batch_size=4 , drop_last=UpperCAmelCase__ , split_batches=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[int]: '''simple docstring''' A__ = BatchSampler(range(16) , batch_size=4 , drop_last=UpperCAmelCase__) A__ = SkipBatchSampler(UpperCAmelCase__ , 2) self.assertListEqual(list(UpperCAmelCase__) , [[8, 9, 10, 11], [12, 13, 14, 15]]) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[str]: '''simple docstring''' A__ = SkipDataLoader(list(range(16)) , batch_size=4 , skip_batches=2) self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]]) def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[Any]: '''simple docstring''' A__ = DataLoader(list(range(16)) , batch_size=4) A__ = skip_first_batches(UpperCAmelCase__ , num_batches=2) self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]]) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[str]: '''simple docstring''' A__ = DataLoaderShard(list(range(16)) , batch_size=4) for idx, _ in enumerate(UpperCAmelCase__): self.assertEqual(dataloader.end_of_dataloader , idx == 3) # Test it also works on the second iteration for idx, _ in enumerate(UpperCAmelCase__): self.assertEqual(dataloader.end_of_dataloader , idx == 3) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Dict: '''simple docstring''' Accelerator() A__ = DataLoaderDispatcher(range(16) , batch_size=4) for idx, _ in enumerate(UpperCAmelCase__): self.assertEqual(dataloader.end_of_dataloader , idx == 3) # Test it also works on the second iteration for idx, _ in enumerate(UpperCAmelCase__): self.assertEqual(dataloader.end_of_dataloader , idx == 3)
14
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) _lowerCamelCase : int = { """configuration_blip""": [ """BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BlipConfig""", """BlipTextConfig""", """BlipVisionConfig""", ], """processing_blip""": ["""BlipProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Tuple = ["""BlipImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : List[Any] = [ """BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """BlipModel""", """BlipPreTrainedModel""", """BlipForConditionalGeneration""", """BlipForQuestionAnswering""", """BlipVisionModel""", """BlipTextModel""", """BlipForImageTextRetrieval""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Optional[Any] = [ """TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFBlipModel""", """TFBlipPreTrainedModel""", """TFBlipForConditionalGeneration""", """TFBlipForQuestionAnswering""", """TFBlipVisionModel""", """TFBlipTextModel""", """TFBlipForImageTextRetrieval""", ] if TYPE_CHECKING: from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig from .processing_blip import BlipProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_blip import BlipImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip import ( BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipPreTrainedModel, BlipTextModel, BlipVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blip import ( TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, TFBlipModel, TFBlipPreTrainedModel, TFBlipTextModel, TFBlipVisionModel, ) else: import sys _lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
14
1
from __future__ import annotations from itertools import permutations from random import randint from timeit import repeat def SCREAMING_SNAKE_CASE ( ) -> tuple[list[int], int]: """simple docstring""" A__ = [randint(-1_000 , 1_000 ) for i in range(10 )] A__ = randint(-5_000 , 5_000 ) return (arr, r) _lowerCamelCase : Tuple = make_dataset() def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> tuple[int, ...]: """simple docstring""" for triplet in permutations(lowercase_ , 3 ): if sum(lowercase_ ) == target: return tuple(sorted(lowercase_ ) ) return (0, 0, 0) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> tuple[int, int, int]: """simple docstring""" arr.sort() A__ = len(lowercase_ ) for i in range(n - 1 ): A__ , A__ = i + 1, n - 1 while left < right: if arr[i] + arr[left] + arr[right] == target: return (arr[i], arr[left], arr[right]) elif arr[i] + arr[left] + arr[right] < target: left += 1 elif arr[i] + arr[left] + arr[right] > target: right -= 1 return (0, 0, 0) def SCREAMING_SNAKE_CASE ( ) -> tuple[float, float]: """simple docstring""" A__ = ''' from __main__ import dataset, triplet_sum1, triplet_sum2 ''' A__ = ''' triplet_sum1(*dataset) ''' A__ = ''' triplet_sum2(*dataset) ''' A__ = repeat(setup=lowercase_ , stmt=lowercase_ , repeat=5 , number=10_000 ) A__ = repeat(setup=lowercase_ , stmt=lowercase_ , repeat=5 , number=10_000 ) return (min(lowercase_ ), min(lowercase_ )) if __name__ == "__main__": from doctest import testmod testmod() _lowerCamelCase : Optional[Any] = solution_times() print(F'''The time for naive implementation is {times[0]}.''') print(F'''The time for optimized implementation is {times[1]}.''')
14
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCamelCase : List[str] = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : List[Any] = [ """VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""", """ViTMSNModel""", """ViTMSNForImageClassification""", """ViTMSNPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_msn import ( VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, ) else: import sys _lowerCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
14
1
import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowerCamelCase : Optional[Any] = logging.get_logger(__name__) _lowerCamelCase : List[Any] = {"""vocab_file""": """vocab.txt"""} _lowerCamelCase : List[Any] = { """vocab_file""": { """openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""", }, } _lowerCamelCase : List[str] = { """openbmb/cpm-ant-10b""": 1024, } def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any: """simple docstring""" A__ = collections.OrderedDict() with open(lowercase_ , '''r''' , encoding='''utf-8''' ) as reader: A__ = reader.readlines() for index, token in enumerate(lowercase_ ): A__ = token.rstrip('''\n''' ) A__ = index return vocab class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str]="<unk>" , UpperCAmelCase__ : Union[str, Any]=200) ->List[str]: '''simple docstring''' A__ = vocab A__ = unk_token A__ = max_input_chars_per_word def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Optional[int]) ->Optional[int]: '''simple docstring''' A__ = list(UpperCAmelCase__) if len(UpperCAmelCase__) > self.max_input_chars_per_word: return [self.unk_token] A__ = 0 A__ = [] while start < len(UpperCAmelCase__): A__ = len(UpperCAmelCase__) A__ = None while start < end: A__ = ''''''.join(chars[start:end]) if substr in self.vocab: A__ = substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token) start += 1 else: sub_tokens.append(UpperCAmelCase__) A__ = end return sub_tokens class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = ['''input_ids''', '''attention_mask'''] UpperCAmelCase__ = False def __init__( self : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int]="<d>" , UpperCAmelCase__ : Union[str, Any]="</d>" , UpperCAmelCase__ : Dict="<s>" , UpperCAmelCase__ : Optional[int]="</s>" , UpperCAmelCase__ : Optional[int]="<pad>" , UpperCAmelCase__ : Dict="<unk>" , UpperCAmelCase__ : Optional[Any]="</n>" , UpperCAmelCase__ : List[Any]="</_>" , UpperCAmelCase__ : Optional[Any]="left" , **UpperCAmelCase__ : Union[str, Any] , ) ->Dict: '''simple docstring''' requires_backends(self , ['''jieba''']) super().__init__( bod_token=UpperCAmelCase__ , eod_token=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , line_token=UpperCAmelCase__ , space_token=UpperCAmelCase__ , padding_side=UpperCAmelCase__ , **UpperCAmelCase__ , ) A__ = bod_token A__ = eod_token A__ = load_vocab(UpperCAmelCase__) A__ = self.encoder[space_token] A__ = self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] A__ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda UpperCAmelCase__: x[1])) A__ = {v: k for k, v in self.encoder.items()} A__ = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token) @property def SCREAMING_SNAKE_CASE ( self : List[str]) ->Dict: '''simple docstring''' return self.encoder[self.bod_token] @property def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[Any]: '''simple docstring''' return self.encoder[self.eod_token] @property def SCREAMING_SNAKE_CASE ( self : int) ->List[Any]: '''simple docstring''' return self.encoder["\n"] @property def SCREAMING_SNAKE_CASE ( self : int) ->int: '''simple docstring''' return len(self.encoder) def SCREAMING_SNAKE_CASE ( self : Dict) ->str: '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder) def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : Any) ->Union[str, Any]: '''simple docstring''' A__ = [] for x in jieba.cut(UpperCAmelCase__ , cut_all=UpperCAmelCase__): output_tokens.extend(self.wordpiece_tokenizer.tokenize(UpperCAmelCase__)) return output_tokens def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : Dict) ->List[str]: '''simple docstring''' A__ = [i for i in token_ids if i >= 0] A__ = [ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(UpperCAmelCase__ , **UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : Union[str, Any]) ->Tuple: '''simple docstring''' return token in self.encoder def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : List[str]) ->str: '''simple docstring''' return "".join(UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : int) ->Union[str, Any]: '''simple docstring''' return self.encoder.get(UpperCAmelCase__ , self.encoder.get(self.unk_token)) def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : List[str]) ->Any: '''simple docstring''' return self.decoder.get(UpperCAmelCase__ , self.unk_token) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None) ->Tuple[str]: '''simple docstring''' if os.path.isdir(UpperCAmelCase__): A__ = os.path.join( UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file''']) else: A__ = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory A__ = 0 if " " in self.encoder: A__ = self.encoder[''' '''] del self.encoder[" "] if "\n" in self.encoder: A__ = self.encoder['''\n'''] del self.encoder["\n"] A__ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda UpperCAmelCase__: x[1])) with open(UpperCAmelCase__ , '''w''' , encoding='''utf-8''') as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.""" ''' Please check that the vocabulary is not corrupted!''') A__ = token_index writer.write(token + '''\n''') index += 1 return (vocab_file,) def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : List[int] = None) ->List[int]: '''simple docstring''' if token_ids_a is None: return [self.bos_token_id] + token_ids_a return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False) ->List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__) if token_ids_a is not None: return [1] + ([0] * len(UpperCAmelCase__)) + [1] + ([0] * len(UpperCAmelCase__)) return [1] + ([0] * len(UpperCAmelCase__))
14
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> "list[int]": """simple docstring""" if upper_limit < 0: raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' ) A__ = [0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 A__ = 1 if upper_limit > 0: A__ = 1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 , upper_limit + 1 ): for j in range(lowercase_ ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""") print("""\n*** Enter -1 at any time to quit ***""") print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""") try: while True: _lowerCamelCase : List[Any] = int(input().strip()) if N < 0: print("""\n********* Goodbye!! ************""") break else: print(F'''The Catalan numbers from 0 through {N} are:''') print(catalan_numbers(N)) print("""Try another upper limit for the sequence: """, end="""""") except (NameError, ValueError): print("""\n********* Invalid input, goodbye! ************\n""") import doctest doctest.testmod()
14
1
from __future__ import annotations import unittest from transformers import AutoTokenizer, MBartConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel @require_tf class UpperCamelCase_ : '''simple docstring''' UpperCAmelCase__ = MBartConfig UpperCAmelCase__ = {} UpperCAmelCase__ = '''gelu''' def __init__( self : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str=13 , UpperCAmelCase__ : List[str]=7 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : str=99 , UpperCAmelCase__ : Union[str, Any]=32 , UpperCAmelCase__ : Optional[Any]=2 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : Union[str, Any]=37 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : Any=20 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : int=1 , UpperCAmelCase__ : Union[str, Any]=0 , ) ->Any: '''simple docstring''' A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_labels A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = eos_token_id A__ = pad_token_id A__ = bos_token_id def SCREAMING_SNAKE_CASE ( self : Tuple) ->str: '''simple docstring''' A__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) A__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1) A__ = tf.concat([input_ids, eos_tensor] , axis=1) A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) A__ = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) A__ = prepare_mbart_inputs_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__) return config, inputs_dict def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any]) ->Union[str, Any]: '''simple docstring''' A__ = TFMBartModel(config=UpperCAmelCase__).get_decoder() A__ = inputs_dict['''input_ids'''] A__ = input_ids[:1, :] A__ = inputs_dict['''attention_mask'''][:1, :] A__ = inputs_dict['''head_mask'''] A__ = 1 # first forward pass A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , head_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__) A__ , A__ = outputs.to_tuple() A__ = past_key_values[1] def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , ) -> Union[str, Any]: """simple docstring""" if attention_mask is None: A__ = tf.cast(tf.math.not_equal(lowercase_ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: A__ = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: A__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else () UpperCAmelCase__ = (TFMBartForConditionalGeneration,) if is_tf_available() else () UpperCAmelCase__ = ( { '''conversational''': TFMBartForConditionalGeneration, '''feature-extraction''': TFMBartModel, '''summarization''': TFMBartForConditionalGeneration, '''text2text-generation''': TFMBartForConditionalGeneration, '''translation''': TFMBartForConditionalGeneration, } if is_tf_available() else {} ) UpperCAmelCase__ = True UpperCAmelCase__ = False UpperCAmelCase__ = False def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int]) ->int: '''simple docstring''' if pipeline_test_casse_name != "FeatureExtractionPipelineTests": # Exception encountered when calling layer '...' return True return False def SCREAMING_SNAKE_CASE ( self : str) ->Any: '''simple docstring''' A__ = TFMBartModelTester(self) A__ = ConfigTester(self , config_class=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[Any]: '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*UpperCAmelCase__) @require_sentencepiece @require_tokenizers @require_tf class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = [ ''' UN Chief Says There Is No Military Solution in Syria''', ] UpperCAmelCase__ = [ '''Şeful ONU declară că nu există o soluţie militară în Siria''', ] UpperCAmelCase__ = '''facebook/mbart-large-en-ro''' @cached_property def SCREAMING_SNAKE_CASE ( self : Any) ->int: '''simple docstring''' return AutoTokenizer.from_pretrained(self.model_name) @cached_property def SCREAMING_SNAKE_CASE ( self : Dict) ->Union[str, Any]: '''simple docstring''' A__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name) return model def SCREAMING_SNAKE_CASE ( self : List[Any] , **UpperCAmelCase__ : List[Any]) ->Union[str, Any]: '''simple docstring''' A__ = self.translate_src_text(**UpperCAmelCase__) self.assertListEqual(self.expected_text , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str] , **UpperCAmelCase__ : Dict) ->str: '''simple docstring''' A__ = self.tokenizer(self.src_text , **UpperCAmelCase__ , return_tensors='''tf''') A__ = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2) A__ = self.tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__) return generated_words @slow def SCREAMING_SNAKE_CASE ( self : Dict) ->List[Any]: '''simple docstring''' self._assert_generated_batch_equal_expected()
14
import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict: """simple docstring""" A__ = args.pruning_method A__ = args.threshold A__ = args.model_name_or_path.rstrip('''/''' ) A__ = args.target_model_path print(f"""Load fine-pruned model from {model_name_or_path}""" ) A__ = torch.load(os.path.join(lowercase_ , '''pytorch_model.bin''' ) ) A__ = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: A__ = tensor print(f"""Copied layer {name}""" ) elif "classifier" in name or "qa_output" in name: A__ = tensor print(f"""Copied layer {name}""" ) elif "bias" in name: A__ = tensor print(f"""Copied layer {name}""" ) else: if pruning_method == "magnitude": A__ = MagnitudeBinarizer.apply(inputs=lowercase_ , threshold=lowercase_ ) A__ = tensor * mask print(f"""Pruned layer {name}""" ) elif pruning_method == "topK": if "mask_scores" in name: continue A__ = name[:-6] A__ = model[f"""{prefix_}mask_scores"""] A__ = TopKBinarizer.apply(lowercase_ , lowercase_ ) A__ = tensor * mask print(f"""Pruned layer {name}""" ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue A__ = name[:-6] A__ = model[f"""{prefix_}mask_scores"""] A__ = ThresholdBinarizer.apply(lowercase_ , lowercase_ , lowercase_ ) A__ = tensor * mask print(f"""Pruned layer {name}""" ) elif pruning_method == "l0": if "mask_scores" in name: continue A__ = name[:-6] A__ = model[f"""{prefix_}mask_scores"""] A__ , A__ = -0.1, 1.1 A__ = torch.sigmoid(lowercase_ ) A__ = s * (r - l) + l A__ = s_bar.clamp(min=0.0 , max=1.0 ) A__ = tensor * mask print(f"""Pruned layer {name}""" ) else: raise ValueError('''Unknown pruning method''' ) if target_model_path is None: A__ = os.path.join( os.path.dirname(lowercase_ ) , f"""bertarized_{os.path.basename(lowercase_ )}""" ) if not os.path.isdir(lowercase_ ): shutil.copytree(lowercase_ , lowercase_ ) print(f"""\nCreated folder {target_model_path}""" ) torch.save(lowercase_ , os.path.join(lowercase_ , '''pytorch_model.bin''' ) ) print('''\nPruned model saved! See you later!''' ) if __name__ == "__main__": _lowerCamelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument( """--pruning_method""", choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""], type=str, required=True, help=( """Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,""" """ sigmoied_threshold = Soft movement pruning)""" ), ) parser.add_argument( """--threshold""", type=float, required=False, help=( """For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.""" """For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.""" """Not needed for `l0`""" ), ) parser.add_argument( """--model_name_or_path""", type=str, required=True, help="""Folder containing the model that was previously fine-pruned""", ) parser.add_argument( """--target_model_path""", default=None, type=str, required=False, help="""Folder containing the model that was previously fine-pruned""", ) _lowerCamelCase : int = parser.parse_args() main(args)
14
1
from __future__ import annotations _lowerCamelCase : Optional[Any] = 1.60_21E-19 # units = C def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , ) -> tuple[str, float]: """simple docstring""" if (conductivity, electron_conc, mobility).count(0 ) != 1: raise ValueError('''You cannot supply more or less than 2 values''' ) elif conductivity < 0: raise ValueError('''Conductivity cannot be negative''' ) elif electron_conc < 0: raise ValueError('''Electron concentration cannot be negative''' ) elif mobility < 0: raise ValueError('''mobility cannot be negative''' ) elif conductivity == 0: return ( "conductivity", mobility * electron_conc * ELECTRON_CHARGE, ) elif electron_conc == 0: return ( "electron_conc", conductivity / (mobility * ELECTRON_CHARGE), ) else: return ( "mobility", conductivity / (electron_conc * ELECTRON_CHARGE), ) if __name__ == "__main__": import doctest doctest.testmod()
14
_lowerCamelCase : Optional[int] = 65521 def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int: """simple docstring""" A__ = 1 A__ = 0 for plain_chr in plain_text: A__ = (a + ord(lowercase_ )) % MOD_ADLER A__ = (b + a) % MOD_ADLER return (b << 16) | a
14
1
from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = ['''vqvae'''] def __init__( self : Optional[int] , UpperCAmelCase__ : AutoencoderKL , UpperCAmelCase__ : UNetaDConditionModel , UpperCAmelCase__ : Mel , UpperCAmelCase__ : Union[DDIMScheduler, DDPMScheduler] , ) ->Dict: '''simple docstring''' super().__init__() self.register_modules(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , mel=UpperCAmelCase__ , vqvae=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[Any]) ->int: '''simple docstring''' return 50 if isinstance(self.scheduler , UpperCAmelCase__) else 1_000 @torch.no_grad() def __call__( self : Tuple , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : str = None , UpperCAmelCase__ : np.ndarray = None , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : int = None , UpperCAmelCase__ : torch.Generator = None , UpperCAmelCase__ : float = 0 , UpperCAmelCase__ : float = 0 , UpperCAmelCase__ : torch.Generator = None , UpperCAmelCase__ : float = 0 , UpperCAmelCase__ : torch.Tensor = None , UpperCAmelCase__ : torch.Tensor = None , UpperCAmelCase__ : Union[str, Any]=True , ) ->Union[ Union[AudioPipelineOutput, ImagePipelineOutput], Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]], ]: '''simple docstring''' A__ = steps or self.get_default_steps() self.scheduler.set_timesteps(UpperCAmelCase__) A__ = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size) == int: A__ = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: A__ = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) , generator=UpperCAmelCase__ , device=self.device , ) A__ = noise A__ = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(UpperCAmelCase__ , UpperCAmelCase__) A__ = self.mel.audio_slice_to_image(UpperCAmelCase__) A__ = np.frombuffer(input_image.tobytes() , dtype='''uint8''').reshape( (input_image.height, input_image.width)) A__ = (input_image / 255) * 2 - 1 A__ = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float).to(self.device) if self.vqvae is not None: A__ = self.vqvae.encode(torch.unsqueeze(UpperCAmelCase__ , 0)).latent_dist.sample( generator=UpperCAmelCase__)[0] A__ = self.vqvae.config.scaling_factor * input_images if start_step > 0: A__ = self.scheduler.add_noise(UpperCAmelCase__ , UpperCAmelCase__ , self.scheduler.timesteps[start_step - 1]) A__ = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) A__ = int(mask_start_secs * pixels_per_second) A__ = int(mask_end_secs * pixels_per_second) A__ = self.scheduler.add_noise(UpperCAmelCase__ , UpperCAmelCase__ , torch.tensor(self.scheduler.timesteps[start_step:])) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:])): if isinstance(self.unet , UpperCAmelCase__): A__ = self.unet(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)['''sample'''] else: A__ = self.unet(UpperCAmelCase__ , UpperCAmelCase__)['''sample'''] if isinstance(self.scheduler , UpperCAmelCase__): A__ = self.scheduler.step( model_output=UpperCAmelCase__ , timestep=UpperCAmelCase__ , sample=UpperCAmelCase__ , eta=UpperCAmelCase__ , generator=UpperCAmelCase__ , )['''prev_sample'''] else: A__ = self.scheduler.step( model_output=UpperCAmelCase__ , timestep=UpperCAmelCase__ , sample=UpperCAmelCase__ , generator=UpperCAmelCase__ , )['''prev_sample'''] if mask is not None: if mask_start > 0: A__ = mask[:, step, :, :mask_start] if mask_end > 0: A__ = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance A__ = 1 / self.vqvae.config.scaling_factor * images A__ = self.vqvae.decode(UpperCAmelCase__)['''sample'''] A__ = (images / 2 + 0.5).clamp(0 , 1) A__ = images.cpu().permute(0 , 2 , 3 , 1).numpy() A__ = (images * 255).round().astype('''uint8''') A__ = list( (Image.fromarray(_[:, :, 0]) for _ in images) if images.shape[3] == 1 else (Image.fromarray(UpperCAmelCase__ , mode='''RGB''').convert('''L''') for _ in images)) A__ = [self.mel.image_to_audio(UpperCAmelCase__) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(UpperCAmelCase__)[:, np.newaxis, :]) , **ImagePipelineOutput(UpperCAmelCase__)) @torch.no_grad() def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : List[Image.Image] , UpperCAmelCase__ : int = 50) ->np.ndarray: '''simple docstring''' assert isinstance(self.scheduler , UpperCAmelCase__) self.scheduler.set_timesteps(UpperCAmelCase__) A__ = np.array( [np.frombuffer(image.tobytes() , dtype='''uint8''').reshape((1, image.height, image.width)) for image in images]) A__ = (sample / 255) * 2 - 1 A__ = torch.Tensor(UpperCAmelCase__).to(self.device) for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,))): A__ = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps A__ = self.scheduler.alphas_cumprod[t] A__ = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) A__ = 1 - alpha_prod_t A__ = self.unet(UpperCAmelCase__ , UpperCAmelCase__)['''sample'''] A__ = (1 - alpha_prod_t_prev) ** 0.5 * model_output A__ = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) A__ = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ : torch.Tensor , UpperCAmelCase__ : torch.Tensor , UpperCAmelCase__ : float) ->torch.Tensor: '''simple docstring''' A__ = acos(torch.dot(torch.flatten(UpperCAmelCase__) , torch.flatten(UpperCAmelCase__)) / torch.norm(UpperCAmelCase__) / torch.norm(UpperCAmelCase__)) return sin((1 - alpha) * theta) * xa / sin(UpperCAmelCase__) + sin(alpha * theta) * xa / sin(UpperCAmelCase__)
14
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer _lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) _lowerCamelCase : Tuple = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} _lowerCamelCase : Union[str, Any] = { """vocab_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } _lowerCamelCase : str = { """vocab_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } _lowerCamelCase : str = { """vocab_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json""" ), }, } _lowerCamelCase : Any = { """facebook/dpr-ctx_encoder-single-nq-base""": 512, """facebook/dpr-ctx_encoder-multiset-base""": 512, } _lowerCamelCase : List[str] = { """facebook/dpr-question_encoder-single-nq-base""": 512, """facebook/dpr-question_encoder-multiset-base""": 512, } _lowerCamelCase : Tuple = { """facebook/dpr-reader-single-nq-base""": 512, """facebook/dpr-reader-multiset-base""": 512, } _lowerCamelCase : Optional[Any] = { """facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True}, } _lowerCamelCase : Optional[int] = { """facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True}, } _lowerCamelCase : Optional[Any] = { """facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True}, } class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCAmelCase__ = DPRContextEncoderTokenizer class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCAmelCase__ = DPRQuestionEncoderTokenizer _lowerCamelCase : int = collections.namedtuple( """DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""] ) _lowerCamelCase : Any = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""]) _lowerCamelCase : Dict = r""" Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Return: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. """ @add_start_docstrings(UpperCAmelCase__ ) class UpperCamelCase_ : '''simple docstring''' def __call__( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Union[bool, str] = False , UpperCAmelCase__ : Union[bool, str] = False , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Optional[bool] = None , **UpperCAmelCase__ : Optional[int] , ) ->BatchEncoding: '''simple docstring''' if titles is None and texts is None: return super().__call__( UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , ) elif titles is None or texts is None: A__ = titles if texts is None else texts return super().__call__( UpperCAmelCase__ , UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , ) A__ = titles if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [titles] A__ = texts if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [texts] A__ = len(UpperCAmelCase__) A__ = questions if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [questions] * n_passages assert len(UpperCAmelCase__) == len( UpperCAmelCase__), f"""There should be as many titles than texts but got {len(UpperCAmelCase__)} titles and {len(UpperCAmelCase__)} texts.""" A__ = super().__call__(UpperCAmelCase__ , UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__)['''input_ids'''] A__ = super().__call__(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__)['''input_ids'''] A__ = { '''input_ids''': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(UpperCAmelCase__ , UpperCAmelCase__) ] } if return_attention_mask is not False: A__ = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids]) A__ = attention_mask return self.pad(UpperCAmelCase__ , padding=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : BatchEncoding , UpperCAmelCase__ : DPRReaderOutput , UpperCAmelCase__ : int = 16 , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : int = 4 , ) ->List[DPRSpanPrediction]: '''simple docstring''' A__ = reader_input['''input_ids'''] A__ , A__ , A__ = reader_output[:3] A__ = len(UpperCAmelCase__) A__ = sorted(range(UpperCAmelCase__) , reverse=UpperCAmelCase__ , key=relevance_logits.__getitem__) A__ = [] for doc_id in sorted_docs: A__ = list(input_ids[doc_id]) # assuming question & title information is at the beginning of the sequence A__ = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: A__ = sequence_ids.index(self.pad_token_id) else: A__ = len(UpperCAmelCase__) A__ = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCAmelCase__ , top_spans=UpperCAmelCase__ , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCAmelCase__ , start_index=UpperCAmelCase__ , end_index=UpperCAmelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1]) , )) if len(UpperCAmelCase__) >= num_spans: break return nbest_spans_predictions[:num_spans] def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , ) ->List[DPRSpanPrediction]: '''simple docstring''' A__ = [] for start_index, start_score in enumerate(UpperCAmelCase__): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]): scores.append(((start_index, start_index + answer_length), start_score + end_score)) A__ = sorted(UpperCAmelCase__ , key=lambda UpperCAmelCase__: x[1] , reverse=UpperCAmelCase__) A__ = [] for (start_index, end_index), score in scores: assert start_index <= end_index, f"""Wrong span indices: [{start_index}:{end_index}]""" A__ = end_index - start_index + 1 assert length <= max_answer_length, f"""Span is too long: {length} > {max_answer_length}""" if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals): continue chosen_span_intervals.append((start_index, end_index)) if len(UpperCAmelCase__) == top_spans: break return chosen_span_intervals @add_end_docstrings(UpperCAmelCase__ ) class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = READER_PRETRAINED_INIT_CONFIGURATION UpperCAmelCase__ = ['''input_ids''', '''attention_mask'''] UpperCAmelCase__ = DPRReaderTokenizer
14
1
import argparse from collections import defaultdict import yaml _lowerCamelCase : List[str] = """docs/source/en/_toctree.yml""" def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any: """simple docstring""" A__ = defaultdict(lowercase_ ) A__ = [] A__ = [] for doc in doc_list: if "local" in doc: counts[doc["local"]] += 1 if doc["title"].lower() == "overview": overview_doc.append({'''local''': doc['''local'''], '''title''': doc['''title''']} ) else: new_doc_list.append(lowercase_ ) A__ = new_doc_list A__ = [key for key, value in counts.items() if value > 1] A__ = [] for duplicate_key in duplicates: A__ = list({doc['''title'''] for doc in doc_list if doc['''local'''] == duplicate_key} ) if len(lowercase_ ) > 1: raise ValueError( f"""{duplicate_key} is present several times in the documentation table of content at """ '''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ''' '''others.''' ) # Only add this once new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in doc_list if '''local''' not in counts or counts[doc['''local''']] == 1] ) A__ = sorted(lowercase_ , key=lambda lowercase_ : s["title"].lower() ) # "overview" gets special treatment and is always first if len(lowercase_ ) > 1: raise ValueError('''{doc_list} has two \'overview\' docs which is not allowed.''' ) overview_doc.extend(lowercase_ ) # Sort return overview_doc def SCREAMING_SNAKE_CASE ( lowercase_=False ) -> Tuple: """simple docstring""" with open(lowercase_ , encoding='''utf-8''' ) as f: A__ = yaml.safe_load(f.read() ) # Get to the API doc A__ = 0 while content[api_idx]["title"] != "API": api_idx += 1 A__ = content[api_idx]['''sections'''] # Then to the model doc A__ = 0 while api_doc[scheduler_idx]["title"] != "Schedulers": scheduler_idx += 1 A__ = api_doc[scheduler_idx]['''sections'''] A__ = clean_doc_toc(lowercase_ ) A__ = False if new_scheduler_doc != scheduler_doc: A__ = True if overwrite: A__ = new_scheduler_doc if diff: if overwrite: A__ = api_doc with open(lowercase_ , '''w''' , encoding='''utf-8''' ) as f: f.write(yaml.dump(lowercase_ , allow_unicode=lowercase_ ) ) else: raise ValueError( '''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' ) def SCREAMING_SNAKE_CASE ( lowercase_=False ) -> Tuple: """simple docstring""" with open(lowercase_ , encoding='''utf-8''' ) as f: A__ = yaml.safe_load(f.read() ) # Get to the API doc A__ = 0 while content[api_idx]["title"] != "API": api_idx += 1 A__ = content[api_idx]['''sections'''] # Then to the model doc A__ = 0 while api_doc[pipeline_idx]["title"] != "Pipelines": pipeline_idx += 1 A__ = False A__ = api_doc[pipeline_idx]['''sections'''] A__ = [] # sort sub pipeline docs for pipeline_doc in pipeline_docs: if "section" in pipeline_doc: A__ = pipeline_doc['''section'''] A__ = clean_doc_toc(lowercase_ ) if overwrite: A__ = new_sub_pipeline_doc new_pipeline_docs.append(lowercase_ ) # sort overall pipeline doc A__ = clean_doc_toc(lowercase_ ) if new_pipeline_docs != pipeline_docs: A__ = True if overwrite: A__ = new_pipeline_docs if diff: if overwrite: A__ = api_doc with open(lowercase_ , '''w''' , encoding='''utf-8''' ) as f: f.write(yaml.dump(lowercase_ , allow_unicode=lowercase_ ) ) else: raise ValueError( '''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' ) if __name__ == "__main__": _lowerCamelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") _lowerCamelCase : str = parser.parse_args() check_scheduler_doc(args.fix_and_overwrite) check_pipeline_doc(args.fix_and_overwrite)
14
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase : Any = logging.get_logger(__name__) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''encoder-decoder''' UpperCAmelCase__ = True def __init__( self : List[str] , **UpperCAmelCase__ : Union[str, Any]) ->List[Any]: '''simple docstring''' super().__init__(**UpperCAmelCase__) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" A__ = kwargs.pop('''encoder''') A__ = encoder_config.pop('''model_type''') A__ = kwargs.pop('''decoder''') A__ = decoder_config.pop('''model_type''') from ..auto.configuration_auto import AutoConfig A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__) A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__) A__ = True @classmethod def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : Union[str, Any]) ->PretrainedConfig: '''simple docstring''' logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''') A__ = True A__ = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]: '''simple docstring''' A__ = copy.deepcopy(self.__dict__) A__ = self.encoder.to_dict() A__ = self.decoder.to_dict() A__ = self.__class__.model_type return output
14
1
from collections.abc import Iterable from typing import Any class UpperCamelCase_ : '''simple docstring''' def __init__( self : Union[str, Any] , UpperCAmelCase__ : int | None = None) ->Union[str, Any]: '''simple docstring''' A__ = value A__ = None # Added in order to delete a node easier A__ = None A__ = None def __repr__( self : Optional[Any]) ->str: '''simple docstring''' from pprint import pformat if self.left is None and self.right is None: return str(self.value) return pformat({f"""{self.value}""": (self.left, self.right)} , indent=1) class UpperCamelCase_ : '''simple docstring''' def __init__( self : Tuple , UpperCAmelCase__ : Node | None = None) ->Optional[Any]: '''simple docstring''' A__ = root def __str__( self : List[str]) ->str: '''simple docstring''' return str(self.root) def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : Node , UpperCAmelCase__ : Node | None) ->None: '''simple docstring''' if new_children is not None: # reset its kids A__ = node.parent if node.parent is not None: # reset its parent if self.is_right(UpperCAmelCase__): # If it is the right children A__ = new_children else: A__ = new_children else: A__ = new_children def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : Node) ->bool: '''simple docstring''' if node.parent and node.parent.right: return node == node.parent.right return False def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->bool: '''simple docstring''' return self.root is None def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Tuple) ->None: '''simple docstring''' A__ = Node(UpperCAmelCase__) # create a new Node if self.empty(): # if Tree is empty A__ = new_node # set its root else: # Tree is not empty A__ = self.root # from root if parent_node is None: return while True: # While we don't get to a leaf if value < parent_node.value: # We go left if parent_node.left is None: A__ = new_node # We insert the new node in a leaf break else: A__ = parent_node.left else: if parent_node.right is None: A__ = new_node break else: A__ = parent_node.right A__ = parent_node def SCREAMING_SNAKE_CASE ( self : Optional[int] , *UpperCAmelCase__ : Optional[Any]) ->None: '''simple docstring''' for value in values: self.__insert(UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : List[str]) ->Node | None: '''simple docstring''' if self.empty(): raise IndexError('''Warning: Tree is empty! please use another.''') else: A__ = self.root # use lazy evaluation here to avoid NoneType Attribute error while node is not None and node.value is not value: A__ = node.left if value < node.value else node.right return node def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Node | None = None) ->Node | None: '''simple docstring''' if node is None: if self.root is None: return None A__ = self.root if not self.empty(): while node.right is not None: A__ = node.right return node def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Node | None = None) ->Node | None: '''simple docstring''' if node is None: A__ = self.root if self.root is None: return None if not self.empty(): A__ = self.root while node.left is not None: A__ = node.left return node def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : int) ->None: '''simple docstring''' A__ = self.search(UpperCAmelCase__) # Look for the node with that label if node is not None: if node.left is None and node.right is None: # If it has no children self.__reassign_nodes(UpperCAmelCase__ , UpperCAmelCase__) elif node.left is None: # Has only right children self.__reassign_nodes(UpperCAmelCase__ , node.right) elif node.right is None: # Has only left children self.__reassign_nodes(UpperCAmelCase__ , node.left) else: A__ = self.get_max( node.left) # Gets the max value of the left branch self.remove(tmp_node.value) # type: ignore A__ = ( tmp_node.value # type: ignore ) # Assigns the value to the node to delete and keep tree structure def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : Node | None) ->Iterable: '''simple docstring''' if node is not None: yield node # Preorder Traversal yield from self.preorder_traverse(node.left) yield from self.preorder_traverse(node.right) def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : Any=None) ->Any: '''simple docstring''' if traversal_function is None: return self.preorder_traverse(self.root) else: return traversal_function(self.root) def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : list , UpperCAmelCase__ : Node | None) ->None: '''simple docstring''' if node: self.inorder(UpperCAmelCase__ , node.left) arr.append(node.value) self.inorder(UpperCAmelCase__ , node.right) def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Node) ->int: '''simple docstring''' A__ = [] self.inorder(UpperCAmelCase__ , UpperCAmelCase__) # append all values to list using inorder traversal return arr[k - 1] def SCREAMING_SNAKE_CASE ( lowercase_ ) -> list[Node]: """simple docstring""" A__ = [] if curr_node is not None: A__ = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node] return node_list def SCREAMING_SNAKE_CASE ( ) -> None: """simple docstring""" A__ = (8, 3, 6, 1, 10, 14, 13, 4, 7) A__ = BinarySearchTree() for i in testlist: t.insert(lowercase_ ) # Prints all the elements of the list in order traversal print(lowercase_ ) if t.search(6 ) is not None: print('''The value 6 exists''' ) else: print('''The value 6 doesn\'t exist''' ) if t.search(-1 ) is not None: print('''The value -1 exists''' ) else: print('''The value -1 doesn\'t exist''' ) if not t.empty(): print('''Max Value: ''' , t.get_max().value ) # type: ignore print('''Min Value: ''' , t.get_min().value ) # type: ignore for i in testlist: t.remove(lowercase_ ) print(lowercase_ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
14
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any: """simple docstring""" A__ = [0] * len(lowercase_ ) A__ = [] A__ = [1] * len(lowercase_ ) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(lowercase_ ) ): if indegree[i] == 0: queue.append(lowercase_ ) while queue: A__ = queue.pop(0 ) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: A__ = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(lowercase_ ) print(max(lowercase_ ) ) # Adjacency list of Graph _lowerCamelCase : Optional[int] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
14
1
from __future__ import annotations from fractions import Fraction def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> bool: """simple docstring""" return ( num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den ) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> list[str]: """simple docstring""" A__ = [] A__ = 11 A__ = int('''1''' + '''0''' * digit_len ) for num in range(lowercase_ , lowercase_ ): while den <= 99: if (num != den) and (num % 10 == den // 10) and (den % 10 != 0): if is_digit_cancelling(lowercase_ , lowercase_ ): solutions.append(f"""{num}/{den}""" ) den += 1 num += 1 A__ = 10 return solutions def SCREAMING_SNAKE_CASE ( lowercase_ = 2 ) -> int: """simple docstring""" A__ = 1.0 for fraction in fraction_list(lowercase_ ): A__ = Fraction(lowercase_ ) result *= frac.denominator / frac.numerator return int(lowercase_ ) if __name__ == "__main__": print(solution())
14
import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline _lowerCamelCase : Optional[Any] = datasets.utils.logging.get_logger(__name__) @dataclass class UpperCamelCase_ ( datasets.BuilderConfig ): '''simple docstring''' UpperCAmelCase__ = None UpperCAmelCase__ = "utf-8" UpperCAmelCase__ = None UpperCAmelCase__ = None UpperCAmelCase__ = True # deprecated UpperCAmelCase__ = None # deprecated UpperCAmelCase__ = 10 << 20 # 10MB UpperCAmelCase__ = None class UpperCamelCase_ ( datasets.ArrowBasedBuilder ): '''simple docstring''' UpperCAmelCase__ = JsonConfig def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str: '''simple docstring''' if self.config.block_size is not None: logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''') A__ = self.config.block_size if self.config.use_threads is not True: logger.warning( '''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''') if self.config.newlines_in_values is not None: raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''') return datasets.DatasetInfo(features=self.config.features) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : List[Any]) ->Dict: '''simple docstring''' if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""") A__ = dl_manager.download_and_extract(self.config.data_files) if isinstance(UpperCAmelCase__ , (str, list, tuple)): A__ = data_files if isinstance(UpperCAmelCase__ , UpperCAmelCase__): A__ = [files] A__ = [dl_manager.iter_files(UpperCAmelCase__) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files})] A__ = [] for split_name, files in data_files.items(): if isinstance(UpperCAmelCase__ , UpperCAmelCase__): A__ = [files] A__ = [dl_manager.iter_files(UpperCAmelCase__) for file in files] splits.append(datasets.SplitGenerator(name=UpperCAmelCase__ , gen_kwargs={'''files''': files})) return splits def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : pa.Table) ->pa.Table: '''simple docstring''' if self.config.features is not None: # adding missing columns for column_name in set(self.config.features) - set(pa_table.column_names): A__ = self.config.features.arrow_schema.field(UpperCAmelCase__).type A__ = pa_table.append_column(UpperCAmelCase__ , pa.array([None] * len(UpperCAmelCase__) , type=UpperCAmelCase__)) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example A__ = table_cast(UpperCAmelCase__ , self.config.features.arrow_schema) return pa_table def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Tuple) ->str: '''simple docstring''' for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase__)): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(UpperCAmelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f: A__ = json.load(UpperCAmelCase__) # We keep only the field we are interested in A__ = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(UpperCAmelCase__ , (list, tuple)): A__ = set().union(*[row.keys() for row in dataset]) A__ = {col: [row.get(UpperCAmelCase__) for row in dataset] for col in keys} else: A__ = dataset A__ = pa.Table.from_pydict(UpperCAmelCase__) yield file_idx, self._cast_table(UpperCAmelCase__) # If the file has one json object per line else: with open(UpperCAmelCase__ , '''rb''') as f: A__ = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small A__ = max(self.config.chunksize // 32 , 16 << 10) A__ = ( self.config.encoding_errors if self.config.encoding_errors is not None else '''strict''' ) while True: A__ = f.read(self.config.chunksize) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(UpperCAmelCase__) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": A__ = batch.decode(self.config.encoding , errors=UpperCAmelCase__).encode('''utf-8''') try: while True: try: A__ = paj.read_json( io.BytesIO(UpperCAmelCase__) , read_options=paj.ReadOptions(block_size=UpperCAmelCase__)) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(UpperCAmelCase__ , pa.ArrowInvalid) and "straddling" not in str(UpperCAmelCase__) or block_size > len(UpperCAmelCase__) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( f"""Batch of {len(UpperCAmelCase__)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""") block_size *= 2 except pa.ArrowInvalid as e: try: with open( UpperCAmelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f: A__ = json.load(UpperCAmelCase__) except json.JSONDecodeError: logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""") raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(UpperCAmelCase__ , UpperCAmelCase__): # list is the only sequence type supported in JSON try: A__ = set().union(*[row.keys() for row in dataset]) A__ = {col: [row.get(UpperCAmelCase__) for row in dataset] for col in keys} A__ = pa.Table.from_pydict(UpperCAmelCase__) except (pa.ArrowInvalid, AttributeError) as e: logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""") raise ValueError(f"""Not able to read records in the JSON file at {file}.""") from None yield file_idx, self._cast_table(UpperCAmelCase__) break else: logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""") raise ValueError( f"""Not able to read records in the JSON file at {file}. """ f"""You should probably indicate the field of the JSON file containing your records. """ f"""This JSON file contain the following fields: {str(list(dataset.keys()))}. """ f"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase__) batch_idx += 1
14
1
import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ): '''simple docstring''' @register_to_config def __init__( self : int , *, UpperCAmelCase__ : int = 4 , UpperCAmelCase__ : int = 768 , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , ) ->Any: '''simple docstring''' super().__init__() A__ = nn.Parameter(torch.zeros(UpperCAmelCase__)) # parameters for additional clip time embeddings A__ = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__) A__ = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__) # parameters for encoder hidden states A__ = clip_extra_context_tokens A__ = nn.Linear( UpperCAmelCase__ , self.clip_extra_context_tokens * cross_attention_dim) A__ = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__) A__ = nn.LayerNorm(UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : int , *, UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any]) ->Any: '''simple docstring''' if do_classifier_free_guidance: # Add the classifier free guidance embeddings to the image embeddings A__ = image_embeddings.shape[0] A__ = self.learned_classifier_free_guidance_embeddings.unsqueeze(0) A__ = classifier_free_guidance_embeddings.expand( UpperCAmelCase__ , -1) A__ = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0) # The image embeddings batch size and the text embeddings batch size are equal assert image_embeddings.shape[0] == prompt_embeds.shape[0] A__ = prompt_embeds.shape[0] # "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and # adding CLIP embeddings to the existing timestep embedding, ... A__ = self.embedding_proj(UpperCAmelCase__) A__ = self.clip_image_embeddings_project_to_time_embeddings(UpperCAmelCase__) A__ = time_projected_image_embeddings + time_projected_prompt_embeds # ... and by projecting CLIP embeddings into four # extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder" A__ = self.clip_extra_context_tokens_proj(UpperCAmelCase__) A__ = clip_extra_context_tokens.reshape(UpperCAmelCase__ , -1 , self.clip_extra_context_tokens) A__ = clip_extra_context_tokens.permute(0 , 2 , 1) A__ = self.encoder_hidden_states_proj(UpperCAmelCase__) A__ = self.text_encoder_hidden_states_norm(UpperCAmelCase__) A__ = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1) return text_encoder_hidden_states, additive_clip_time_embeddings
14
import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch _lowerCamelCase : List[Any] = """sshleifer/bart-tiny-random""" _lowerCamelCase : List[Any] = """patrickvonplaten/t5-tiny-random""" @require_torch class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int: '''simple docstring''' return AutoConfig.from_pretrained(UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Any: '''simple docstring''' A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1) self.assertEqual(student.config.num_hidden_layers , 1) def SCREAMING_SNAKE_CASE ( self : int) ->Any: '''simple docstring''' A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]: '''simple docstring''' A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase__) self.assertEqual(student.config.encoder_layers , 1) self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers) def SCREAMING_SNAKE_CASE ( self : Dict) ->int: '''simple docstring''' A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1) self.assertEqual(student.config.encoder_layers , 1) self.assertEqual(student.config.decoder_layers , 1) def SCREAMING_SNAKE_CASE ( self : str) ->List[Any]: '''simple docstring''' with self.assertRaises(UpperCAmelCase__): create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=UpperCAmelCase__ , d=UpperCAmelCase__)
14
1