code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def UpperCamelCase_( lowerCamelCase_ ) -> str:
return "".join(sorted(lowerCamelCase_ ) )
def UpperCamelCase_( lowerCamelCase_ ) -> list[str]:
return word_by_signature[signature(lowerCamelCase_ )]
SCREAMING_SNAKE_CASE : str = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8")
SCREAMING_SNAKE_CASE : List[str] = sorted({word.strip().lower() for word in data.splitlines()})
SCREAMING_SNAKE_CASE : int = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[Any] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("anagrams.txt", "w") as file:
file.write("all_anagrams = \n ")
file.write(pprint.pformat(all_anagrams))
| 21 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__A =1_6
__A =3_2
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ = 1_6 ):
lowerCamelCase_ = AutoTokenizer.from_pretrained("bert-base-cased" )
lowerCamelCase_ = load_dataset("glue" , "mrpc" )
def tokenize_function(lowerCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase_ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase_ = datasets.map(
lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase_ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowerCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase_ = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase_ = 1_6
elif accelerator.mixed_precision != "no":
lowerCamelCase_ = 8
else:
lowerCamelCase_ = None
return tokenizer.pad(
lowerCamelCase__ , padding="longest" , max_length=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_tensors="pt" , )
# Instantiate dataloaders.
lowerCamelCase_ = DataLoader(
tokenized_datasets["train"] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
lowerCamelCase_ = DataLoader(
tokenized_datasets["validation"] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__A =mocked_dataloaders # noqa: F811
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowerCamelCase__ ) == "1":
lowerCamelCase_ = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
lowerCamelCase_ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
lowerCamelCase_ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase_ = config["lr"]
lowerCamelCase_ = int(config["num_epochs"] )
lowerCamelCase_ = int(config["seed"] )
lowerCamelCase_ = int(config["batch_size"] )
set_seed(lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ = get_dataloaders(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
lowerCamelCase_ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCamelCase_ = batch_size // MAX_GPU_BATCH_SIZE
lowerCamelCase_ = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase_ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowerCamelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase_ = model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase_ = AdamW(params=model.parameters() , lr=lowerCamelCase__ )
# Instantiate scheduler
lowerCamelCase_ = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowerCamelCase__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
lowerCamelCase_ = os.path.split(lowerCamelCase__ )[-1].split("." )[0]
accelerator.init_trackers(lowerCamelCase__ , lowerCamelCase__ )
# Now we train the model
for epoch in range(lowerCamelCase__ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
lowerCamelCase_ = 0
for step, batch in enumerate(lowerCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCamelCase_ = model(**lowerCamelCase__ )
lowerCamelCase_ = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
lowerCamelCase_ = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase_ = model(**lowerCamelCase__ )
lowerCamelCase_ = outputs.logits.argmax(dim=-1 )
lowerCamelCase_ , lowerCamelCase_ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=lowerCamelCase__ , references=lowerCamelCase__ , )
lowerCamelCase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , lowerCamelCase__ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"accuracy": eval_metric["accuracy"],
"f1": eval_metric["f1"],
"train_loss": total_loss.item() / len(lowerCamelCase__ ),
"epoch": epoch,
} , step=lowerCamelCase__ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def lowerCamelCase_ ( ):
lowerCamelCase_ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowerCamelCase__ , default=lowerCamelCase__ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=lowerCamelCase__ , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = {"lr": 2e-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6}
training_function(lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
main()
| 19 | 0 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def UpperCAmelCase_ ( __lowercase : list[int] , __lowercase : list[int] , __lowercase : int ) -> list[int]:
'''simple docstring'''
_UpperCAmelCase = [0] * no_of_processes
_UpperCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(__lowercase ):
_UpperCAmelCase = burst_time[i]
_UpperCAmelCase = []
_UpperCAmelCase = 0
_UpperCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
_UpperCAmelCase = []
_UpperCAmelCase = -1
for i in range(__lowercase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(__lowercase )
if len(__lowercase ) > 0:
_UpperCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
_UpperCAmelCase = i
total_time += burst_time[target_process]
completed += 1
_UpperCAmelCase = 0
_UpperCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def UpperCAmelCase_ ( __lowercase : list[int] , __lowercase : int , __lowercase : list[int] ) -> list[int]:
'''simple docstring'''
_UpperCAmelCase = [0] * no_of_processes
for i in range(__lowercase ):
_UpperCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('''[TEST CASE 01]''')
__SCREAMING_SNAKE_CASE :Dict = 4
__SCREAMING_SNAKE_CASE :int = [2, 5, 3, 7]
__SCREAMING_SNAKE_CASE :List[Any] = [0, 0, 0, 0]
__SCREAMING_SNAKE_CASE :Tuple = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__SCREAMING_SNAKE_CASE :Any = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('''PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time''')
for i, process_id in enumerate(list(range(1, 5))):
print(
F"{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"
F"{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"
)
print(F"\nAverage waiting time = {mean(waiting_time):.5f}")
print(F"Average turnaround time = {mean(turn_around_time):.5f}")
| 22 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__A =None
__A =logging.get_logger(__name__)
__A ={'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__A ={
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
__A ={
'''facebook/mbart-large-en-ro''': 1_0_2_4,
'''facebook/mbart-large-cc25''': 1_0_2_4,
}
# fmt: off
__A =['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = ['input_ids', 'attention_mask']
lowerCAmelCase__ = MBartTokenizer
lowerCAmelCase__ = []
lowerCAmelCase__ = []
def __init__( self , lowercase=None , lowercase=None , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=None , lowercase=None , lowercase=None , **lowercase , ) -> Dict:
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else mask_token
super().__init__(
vocab_file=lowercase , tokenizer_file=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , src_lang=lowercase , tgt_lang=lowercase , additional_special_tokens=lowercase , **lowercase , )
lowerCamelCase_ = vocab_file
lowerCamelCase_ = False if not self.vocab_file else True
lowerCamelCase_ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
lowerCamelCase_ = {
lang_code: self.convert_tokens_to_ids(lowercase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase_ = src_lang if src_lang is not None else "en_XX"
lowerCamelCase_ = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def SCREAMING_SNAKE_CASE_( self ) -> str:
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , **lowercase ) -> List[Any]:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
lowerCamelCase_ = src_lang
lowerCamelCase_ = self(lowercase , add_special_tokens=lowercase , return_tensors=lowercase , **lowercase )
lowerCamelCase_ = self.convert_tokens_to_ids(lowercase )
lowerCamelCase_ = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = "en_XX" , lowercase = None , lowercase = "ro_RO" , **lowercase , ) -> BatchEncoding:
lowerCamelCase_ = src_lang
lowerCamelCase_ = tgt_lang
return super().prepare_seqaseq_batch(lowercase , lowercase , **lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = self.convert_tokens_to_ids(lowercase )
lowerCamelCase_ = []
lowerCamelCase_ = [self.eos_token_id, self.cur_lang_code]
lowerCamelCase_ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase_ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = self.convert_tokens_to_ids(lowercase )
lowerCamelCase_ = []
lowerCamelCase_ = [self.eos_token_id, self.cur_lang_code]
lowerCamelCase_ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase_ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.' )
return
lowerCamelCase_ = os.path.join(
lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ):
copyfile(self.vocab_file , lowercase )
return (out_vocab_file,)
| 19 | 0 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__: str = logging.get_logger(__name__)
UpperCamelCase__: str = {"vocab_file": "vocab.json"}
UpperCamelCase__: Optional[int] = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
UpperCamelCase__: List[str] = {"mgp-str": 27}
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[str] , __snake_case : List[Any] , __snake_case : Optional[Any]="[GO]" , __snake_case : List[Any]="[GO]" , __snake_case : Union[str, Any]="[s]" , __snake_case : Optional[Any]="[GO]" , **__snake_case : List[Any] ) -> Union[str, Any]:
super().__init__(
unk_token=__snake_case , bos_token=__snake_case , eos_token=__snake_case , pad_token=__snake_case , **__snake_case , )
with open(__snake_case , encoding='''utf-8''' ) as vocab_handle:
UpperCAmelCase : Union[str, Any] = json.load(__snake_case )
UpperCAmelCase : Optional[int] = {v: k for k, v in self.vocab.items()}
@property
def A ( self : Union[str, Any] ) -> List[str]:
return len(self.vocab )
def A ( self : Dict ) -> List[Any]:
return dict(self.vocab , **self.added_tokens_encoder )
def A ( self : int , __snake_case : Union[str, Any] ) -> Dict:
UpperCAmelCase : int = []
for s in text:
char_tokens.extend(__snake_case )
return char_tokens
def A ( self : Optional[int] , __snake_case : List[str] ) -> List[Any]:
return self.vocab.get(__snake_case , self.vocab.get(self.unk_token ) )
def A ( self : Optional[Any] , __snake_case : Optional[int] ) -> int:
return self.decoder.get(__snake_case )
def A ( self : Union[str, Any] , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__snake_case ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(__snake_case ) )
return
UpperCAmelCase : List[str] = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=__snake_case , ensure_ascii=__snake_case ) + '''\n''' )
return (vocab_file,)
| 23 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__A =pytest.mark.integration
@require_faiss
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(lowercase ) for x in np.arange(30 ).tolist()]} )
return dset
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
lowerCamelCase_ = dset.map(
lambda lowercase , lowercase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=lowercase , keep_in_memory=lowercase )
lowerCamelCase_ = dset.add_faiss_index("vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
dset.drop_index("vecs" )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase ) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name )
dset.load_faiss_index("vecs2" , tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(lowercase , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
from elasticsearch import Elasticsearch
lowerCamelCase_ = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
lowerCamelCase_ = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
lowerCamelCase_ = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=lowercase )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("filename" , "my_name-train_29" )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
@require_faiss
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase )
self.assertRaises(lowercase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCamelCase_ = np.eye(5 , dtype=np.floataa )[::-1]
lowerCamelCase_ , lowerCamelCase_ = index.search_batch(lowercase )
self.assertRaises(lowercase , index.search_batch , queries[0] )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
import faiss
lowerCamelCase_ = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCamelCase_ = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(lowercase ):
lowerCamelCase_ = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
import faiss
lowerCamelCase_ = faiss.IndexFlat(5 )
lowerCamelCase_ = FaissIndex(custom_index=lowercase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase ) as tmp_file:
index.save(tmp_file.name )
lowerCamelCase_ = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def lowerCamelCase_ ( lowerCamelCase__ ):
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowerCamelCase_ = "index.faiss"
lowerCamelCase_ = F'mock://{index_name}'
index.save(lowerCamelCase__ , storage_options=mockfs.storage_options )
lowerCamelCase_ = FaissIndex.load(lowerCamelCase__ , storage_options=mockfs.storage_options )
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ , lowerCamelCase_ = index.search(lowerCamelCase__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
lowerCamelCase_ = Elasticsearch()
lowerCamelCase_ = {"acknowledged": True}
lowerCamelCase_ = ElasticSearchIndex(es_client=lowercase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
lowerCamelCase_ = "foo"
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCamelCase_ = "foo"
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCamelCase_ = ["foo", "bar", "foobar"]
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search_batch(lowercase )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase ) , 0 )
self.assertListEqual([1, 1, 1] , lowercase )
# batched queries with timeout
lowerCamelCase_ = ["foo", "bar", "foobar"]
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search_batch(lowercase , request_timeout=30 )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase ) , 0 )
self.assertListEqual([1, 1, 1] , lowercase )
| 19 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def lowerCamelCase__ ( snake_case_ : Tuple=None ) -> Optional[int]:
__snake_case = argparse.ArgumentParser(add_help=snake_case_ , allow_abbrev=snake_case_ )
# The main config parser
__snake_case = config_command_parser(snake_case_ )
# The subparser to add commands to
__snake_case = config_parser.add_subparsers(title='''subcommands''' , dest='''subcommand''' )
# Then add other parsers with the parent parser
default_command_parser(snake_case_ , parents=[parent_parser] )
update_command_parser(snake_case_ , parents=[parent_parser] )
return config_parser
def lowerCamelCase__ ( ) -> Optional[int]:
__snake_case = get_config_parser()
__snake_case = config_parser.parse_args()
if not hasattr(snake_case_ , '''func''' ):
config_parser.print_help()
exit(1 )
# Run
args.func(snake_case_ )
if __name__ == "__main__":
main()
| 24 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.0_2 , lowercase=3 , lowercase=4 , lowercase=None , ) -> List[str]:
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
lowerCamelCase_ = self.vocab_size - 1
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowerCamelCase_ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> Dict:
lowerCamelCase_ = OpenAIGPTModel(config=lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , head_mask=lowercase )
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase )
lowerCamelCase_ = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> int:
lowerCamelCase_ = OpenAIGPTLMHeadModel(lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> Dict:
lowerCamelCase_ = OpenAIGPTDoubleHeadsModel(lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> int:
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = OpenAIGPTForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowerCAmelCase__ = (
{
'feature-extraction': OpenAIGPTModel,
'text-classification': OpenAIGPTForSequenceClassification,
'text-generation': OpenAIGPTLMHeadModel,
'zero-shot': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase=False ) -> Any:
lowerCamelCase_ = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowerCamelCase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowercase , )
lowerCamelCase_ = inputs_dict["labels"]
lowerCamelCase_ = inputs_dict["labels"]
lowerCamelCase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowercase , )
lowerCamelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
return inputs_dict
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = OpenAIGPTModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=lowercase , n_embd=37 )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowercase )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Any:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = OpenAIGPTModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" )
model.to(lowercase )
lowerCamelCase_ = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=lowercase ) # the president is
lowerCamelCase_ = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowerCamelCase_ = model.generate(lowercase , do_sample=lowercase )
self.assertListEqual(output_ids[0].tolist() , lowercase )
| 19 | 0 |
"""simple docstring"""
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCAmelCase__ : List[str] = '.'
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
UpperCAmelCase__ : List[Any] = [
'Assert',
'AssignVariableOp',
'EmptyTensorList',
'MergeV2Checkpoints',
'ReadVariableOp',
'ResourceGather',
'RestoreV2',
'SaveV2',
'ShardedFilename',
'StatefulPartitionedCall',
'StaticRegexFullMatch',
'VarHandleOp',
]
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = SavedModel()
SCREAMING_SNAKE_CASE__ : Dict = []
with open(os.path.join(_snake_case ,"""utils""" ,"""tf_ops""" ,"""onnx.json""" ) ) as f:
SCREAMING_SNAKE_CASE__ : Any = json.load(_snake_case )["""opsets"""]
for i in range(1 ,opset + 1 ):
onnx_ops.extend(onnx_opsets[str(_snake_case )] )
with open(_snake_case ,"""rb""" ) as f:
saved_model.ParseFromString(f.read() )
SCREAMING_SNAKE_CASE__ : List[str] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
SCREAMING_SNAKE_CASE__ : int = sorted(_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(_snake_case )
if strict and len(_snake_case ) > 0:
raise Exception(f'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(_snake_case ) > 0:
print(f'''Found the following incompatible ops for the opset {opset}:''' )
print(*_snake_case ,sep="""\n""" )
else:
print(f'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).')
parser.add_argument(
'--opset', default=1_2, type=int, help='The ONNX opset against which the model has to be tested.'
)
parser.add_argument(
'--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.'
)
parser.add_argument(
'--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)'
)
UpperCAmelCase__ : Dict = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 25 |
__A ={str(digit): digit**5 for digit in range(1_0)}
def lowerCamelCase_ ( lowerCamelCase__ ):
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(lowerCamelCase__ ) )
def lowerCamelCase_ ( ):
return sum(
number
for number in range(1_0_0_0 , 1_0_0_0_0_0_0 )
if number == digits_fifth_powers_sum(lowerCamelCase__ ) )
if __name__ == "__main__":
print(solution())
| 19 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class lowercase ( UpperCamelCase__ ):
_a = "camembert"
def __init__( self , _a=3_0522 , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1e-12 , _a=1 , _a=0 , _a=2 , _a="absolute" , _a=True , _a=None , **_a , ) -> str:
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
_A : Any = vocab_size
_A : List[Any] = hidden_size
_A : Dict = num_hidden_layers
_A : int = num_attention_heads
_A : Dict = hidden_act
_A : str = intermediate_size
_A : List[Any] = hidden_dropout_prob
_A : str = attention_probs_dropout_prob
_A : int = max_position_embeddings
_A : Tuple = type_vocab_size
_A : List[str] = initializer_range
_A : Dict = layer_norm_eps
_A : str = position_embedding_type
_A : Dict = use_cache
_A : Any = classifier_dropout
class lowercase ( UpperCamelCase__ ):
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_A : Optional[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_A : Optional[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 26 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A =logging.get_logger(__name__)
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowerCamelCase_ = 1_9_2
lowerCamelCase_ = 7_6_8
lowerCamelCase_ = 1_2
lowerCamelCase_ = 3
lowerCamelCase_ = [8_0_0, 1_3_3_3]
lowerCamelCase_ = False
elif yolos_name == "yolos_s_dWr":
lowerCamelCase_ = 3_3_0
lowerCamelCase_ = 1_4
lowerCamelCase_ = 6
lowerCamelCase_ = 1_3_2_0
elif "yolos_s" in yolos_name:
lowerCamelCase_ = 3_8_4
lowerCamelCase_ = 1_5_3_6
lowerCamelCase_ = 1_2
lowerCamelCase_ = 6
elif "yolos_b" in yolos_name:
lowerCamelCase_ = [8_0_0, 1_3_4_4]
lowerCamelCase_ = 9_1
lowerCamelCase_ = "huggingface/label-files"
lowerCamelCase_ = "coco-detection-id2label.json"
lowerCamelCase_ = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowerCamelCase_ = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase_ = idalabel
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase_ = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
lowerCamelCase_ = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase_ = in_proj_weight[: config.hidden_size, :]
lowerCamelCase_ = in_proj_bias[: config.hidden_size]
lowerCamelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase_ = in_proj_weight[-config.hidden_size :, :]
lowerCamelCase_ = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_ ( lowerCamelCase__ ):
if "backbone" in name:
lowerCamelCase_ = name.replace("backbone" , "vit" )
if "cls_token" in name:
lowerCamelCase_ = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
lowerCamelCase_ = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
lowerCamelCase_ = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
lowerCamelCase_ = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowerCamelCase_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
lowerCamelCase_ = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
lowerCamelCase_ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowerCamelCase_ = name.replace("attn" , "attention.self" )
if "norm1" in name:
lowerCamelCase_ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowerCamelCase_ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowerCamelCase_ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowerCamelCase_ = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
lowerCamelCase_ = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
lowerCamelCase_ = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
lowerCamelCase_ = name.replace("vit.norm" , "vit.layernorm" )
return name
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
for key in orig_state_dict.copy().keys():
lowerCamelCase_ = orig_state_dict.pop(lowerCamelCase__ )
if "qkv" in key:
lowerCamelCase_ = key.split("." )
lowerCamelCase_ = int(key_split[2] )
lowerCamelCase_ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowerCamelCase_ = val[:dim, :]
lowerCamelCase_ = val[
dim : dim * 2, :
]
lowerCamelCase_ = val[-dim:, :]
else:
lowerCamelCase_ = val[:dim]
lowerCamelCase_ = val[dim : dim * 2]
lowerCamelCase_ = val[-dim:]
else:
lowerCamelCase_ = val
return orig_state_dict
def lowerCamelCase_ ( ):
lowerCamelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase_ = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
lowerCamelCase_ = get_yolos_config(lowerCamelCase__ )
# load original state_dict
lowerCamelCase_ = torch.load(lowerCamelCase__ , map_location="cpu" )["model"]
# load 🤗 model
lowerCamelCase_ = YolosForObjectDetection(lowerCamelCase__ )
model.eval()
lowerCamelCase_ = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image, prepared by YolosImageProcessor
lowerCamelCase_ = 8_0_0 if yolos_name != "yolos_ti" else 5_1_2
lowerCamelCase_ = YolosImageProcessor(format="coco_detection" , size=lowerCamelCase__ )
lowerCamelCase_ = image_processor(images=prepare_img() , return_tensors="pt" )
lowerCamelCase_ = model(**lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ = outputs.logits, outputs.pred_boxes
lowerCamelCase_ , lowerCamelCase_ = None, None
if yolos_name == "yolos_ti":
lowerCamelCase_ = torch.tensor(
[[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]] )
lowerCamelCase_ = torch.tensor(
[[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]] )
elif yolos_name == "yolos_s_200_pre":
lowerCamelCase_ = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] )
lowerCamelCase_ = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] )
elif yolos_name == "yolos_s_300_pre":
lowerCamelCase_ = torch.tensor(
[[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]] )
lowerCamelCase_ = torch.tensor(
[[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]] )
elif yolos_name == "yolos_s_dWr":
lowerCamelCase_ = torch.tensor(
[[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]] )
lowerCamelCase_ = torch.tensor(
[[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]] )
elif yolos_name == "yolos_base":
lowerCamelCase_ = torch.tensor(
[[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]] )
lowerCamelCase_ = torch.tensor(
[[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]] )
else:
raise ValueError(F'Unknown yolos_name: {yolos_name}' )
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(F'Saving model {yolos_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
lowerCamelCase_ = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
lowerCamelCase_ = model_mapping[yolos_name]
image_processor.push_to_hub(lowerCamelCase__ , organization="hustvl" )
model.push_to_hub(lowerCamelCase__ , organization="hustvl" )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__A =parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 19 | 0 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowercase : int = logging.get_logger(__name__)
__lowercase : List[str] = {'vocab_file': 'vocab.json'}
__lowercase : List[Any] = {
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
__lowercase : Tuple = {'mgp-str': 27}
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __a , __a="[GO]" , __a="[GO]" , __a="[s]" , __a="[GO]" , **__a ):
'''simple docstring'''
super().__init__(
unk_token=__a , bos_token=__a , eos_token=__a , pad_token=__a , **__a , )
with open(__a , encoding='utf-8' ) as vocab_handle:
__a : Dict = json.load(__a )
__a : Union[str, Any] = {v: k for k, v in self.vocab.items()}
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return len(self.vocab )
def __UpperCAmelCase ( self ):
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : str = []
for s in text:
char_tokens.extend(__a )
return char_tokens
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
return self.vocab.get(__a , self.vocab.get(self.unk_token ) )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
return self.decoder.get(__a )
def __UpperCAmelCase ( self , __a , __a = None ):
'''simple docstring'''
if not os.path.isdir(__a ):
logger.error('Vocabulary path ({}) should be a directory'.format(__a ) )
return
__a : Any = os.path.join(
__a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
with open(__a , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=__a , ensure_ascii=__a ) + '\n' )
return (vocab_file,)
| 27 |
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = [0 for i in range(r + 1 )]
# nc0 = 1
lowerCamelCase_ = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
lowerCamelCase_ = min(lowerCamelCase__ , lowerCamelCase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=1_0, r=5))
| 19 | 0 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def A ( self : int , UpperCamelCase__ : Tuple ):
"""simple docstring"""
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = [label.strip() for label in labels.split(',' ) if label.strip()]
return labels
def __call__( self : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ):
"""simple docstring"""
if len(UpperCamelCase__ ) == 0 or len(UpperCamelCase__ ) == 0:
raise ValueError('You must include at least one label and at least one sequence.' )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
'The provided hypothesis_template "{}" was not able to be formatted with the target labels. '
'Make sure the passed template includes formatting syntax such as {{}} where the label should go.'
).format(UpperCamelCase__ ) )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = [sequences]
UpperCamelCase = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(UpperCamelCase__ )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(_a )
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase__ : Any=ZeroShotClassificationArgumentHandler() , *UpperCamelCase__ : Dict , **UpperCamelCase__ : str ):
"""simple docstring"""
UpperCamelCase = args_parser
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
if self.entailment_id == -1:
logger.warning(
'Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '
'-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.' )
@property
def A ( self : List[Any] ):
"""simple docstring"""
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('entail' ):
return ind
return -1
def A ( self : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[Any]=TruncationStrategy.ONLY_FIRST , **UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'Tokenizer was not supporting padding necessary for zero-shot, attempting to use '
' `pad_token=eos_token`' )
UpperCamelCase = self.tokenizer.eos_token
try:
UpperCamelCase = self.tokenizer(
UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , )
except Exception as e:
if "too short" in str(UpperCamelCase__ ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
UpperCamelCase = self.tokenizer(
UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def A ( self : Optional[Any] , **UpperCamelCase__ : List[str] ):
"""simple docstring"""
if kwargs.get('multi_class' , UpperCamelCase__ ) is not None:
UpperCamelCase = kwargs['multi_class']
logger.warning(
'The `multi_class` argument has been deprecated and renamed to `multi_label`. '
'`multi_class` will be removed in a future version of Transformers.' )
UpperCamelCase = {}
if "candidate_labels" in kwargs:
UpperCamelCase = self._args_parser._parse_labels(kwargs['candidate_labels'] )
if "hypothesis_template" in kwargs:
UpperCamelCase = kwargs['hypothesis_template']
UpperCamelCase = {}
if "multi_label" in kwargs:
UpperCamelCase = kwargs['multi_label']
return preprocess_params, {}, postprocess_params
def __call__( self : int , UpperCamelCase__ : Union[str, List[str]] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Any , ):
"""simple docstring"""
if len(UpperCamelCase__ ) == 0:
pass
elif len(UpperCamelCase__ ) == 1 and "candidate_labels" not in kwargs:
UpperCamelCase = args[0]
else:
raise ValueError(f"""Unable to understand extra arguments {args}""" )
return super().__call__(UpperCamelCase__ , **UpperCamelCase__ )
def A ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Optional[int]="This example is {}." ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self._args_parser(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for i, (candidate_label, sequence_pair) in enumerate(zip(UpperCamelCase__ , UpperCamelCase__ ) ):
UpperCamelCase = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(UpperCamelCase__ ) - 1,
**model_input,
}
def A ( self : Optional[Any] , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
UpperCamelCase = inputs['candidate_label']
UpperCamelCase = inputs['sequence']
UpperCamelCase = {k: inputs[k] for k in self.tokenizer.model_input_names}
UpperCamelCase = self.model(**UpperCamelCase__ )
UpperCamelCase = {
'candidate_label': candidate_label,
'sequence': sequence,
'is_last': inputs['is_last'],
**outputs,
}
return model_outputs
def A ( self : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int]=False ):
"""simple docstring"""
UpperCamelCase = [outputs['candidate_label'] for outputs in model_outputs]
UpperCamelCase = [outputs['sequence'] for outputs in model_outputs]
UpperCamelCase = np.concatenate([output['logits'].numpy() for output in model_outputs] )
UpperCamelCase = logits.shape[0]
UpperCamelCase = len(UpperCamelCase__ )
UpperCamelCase = N // n
UpperCamelCase = logits.reshape((num_sequences, n, -1) )
if multi_label or len(UpperCamelCase__ ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
UpperCamelCase = self.entailment_id
UpperCamelCase = -1 if entailment_id == 0 else 0
UpperCamelCase = reshaped_outputs[..., [contradiction_id, entailment_id]]
UpperCamelCase = np.exp(UpperCamelCase__ ) / np.exp(UpperCamelCase__ ).sum(-1 , keepdims=UpperCamelCase__ )
UpperCamelCase = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
UpperCamelCase = reshaped_outputs[..., self.entailment_id]
UpperCamelCase = np.exp(UpperCamelCase__ ) / np.exp(UpperCamelCase__ ).sum(-1 , keepdims=UpperCamelCase__ )
UpperCamelCase = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 28 |
import math
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(lowerCamelCase__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("This should never happen" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
__A ='''Enter the base and the power separated by a comma: '''
__A, __A =map(int, input(prompt).split(''','''))
__A, __A =map(int, input(prompt).split(''','''))
# We find the log of each number, using the function res(), which takes two
# arguments.
__A =res(xa, ya)
__A =res(xa, ya)
# We check for the largest number
if resa > resa:
print('''Largest number is''', xa, '''^''', ya)
elif resa > resa:
print('''Largest number is''', xa, '''^''', ya)
else:
print('''Both are equal''')
| 19 | 0 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : str = '''segformer'''
def __init__( self , _UpperCamelCase=3 , _UpperCamelCase=4 , _UpperCamelCase=[2, 2, 2, 2] , _UpperCamelCase=[8, 4, 2, 1] , _UpperCamelCase=[3_2, 6_4, 1_6_0, 2_5_6] , _UpperCamelCase=[7, 3, 3, 3] , _UpperCamelCase=[4, 2, 2, 2] , _UpperCamelCase=[1, 2, 5, 8] , _UpperCamelCase=[4, 4, 4, 4] , _UpperCamelCase="gelu" , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=0.1 , _UpperCamelCase=0.02 , _UpperCamelCase=0.1 , _UpperCamelCase=1E-6 , _UpperCamelCase=2_5_6 , _UpperCamelCase=2_5_5 , **_UpperCamelCase , ) -> List[Any]:
super().__init__(**_UpperCamelCase )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , _UpperCamelCase , )
UpperCAmelCase_ : str = num_channels
UpperCAmelCase_ : List[Any] = num_encoder_blocks
UpperCAmelCase_ : str = depths
UpperCAmelCase_ : List[Any] = sr_ratios
UpperCAmelCase_ : List[str] = hidden_sizes
UpperCAmelCase_ : List[str] = patch_sizes
UpperCAmelCase_ : Dict = strides
UpperCAmelCase_ : Dict = mlp_ratios
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : str = hidden_act
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = classifier_dropout_prob
UpperCAmelCase_ : Tuple = initializer_range
UpperCAmelCase_ : List[str] = drop_path_rate
UpperCAmelCase_ : Union[str, Any] = layer_norm_eps
UpperCAmelCase_ : int = decoder_hidden_size
UpperCAmelCase_ : Optional[Any] = kwargs.get('reshape_last_stage' , _UpperCamelCase )
UpperCAmelCase_ : List[str] = semantic_loss_ignore_index
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Any = version.parse('''1.11''' )
@property
def __UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __UpperCAmelCase ( self ) -> float:
return 1E-4
@property
def __UpperCAmelCase ( self ) -> int:
return 1_2
| 29 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
__A =logging.get_logger(__name__)
__A =list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
__A =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Model type selected in the list: ' + ', '.join(snake_case_ )} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowerCAmelCase__ = field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase__ = field(
default=1_28 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowerCAmelCase__ = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowerCAmelCase__ = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowerCAmelCase__ = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase__ = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase__ = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowerCAmelCase__ = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 'train'
lowerCAmelCase__ = 'dev'
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __init__( self , lowercase , lowercase , lowercase = None , lowercase = Split.train , lowercase = False , lowercase = None , lowercase = "pt" , ) -> List[str]:
lowerCamelCase_ = args
lowerCamelCase_ = is_language_sensitive
lowerCamelCase_ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowercase , lowercase ):
try:
lowerCamelCase_ = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
lowerCamelCase_ = mode
# Load data features from cache or dataset file
lowerCamelCase_ = "v2" if args.version_2_with_negative else "v1"
lowerCamelCase_ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase_ = cached_features_file + ".lock"
with FileLock(lowercase ):
if os.path.exists(lowercase ) and not args.overwrite_cache:
lowerCamelCase_ = time.time()
lowerCamelCase_ = torch.load(lowercase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowerCamelCase_ = self.old_features["features"]
lowerCamelCase_ = self.old_features.get("dataset" , lowercase )
lowerCamelCase_ = self.old_features.get("examples" , lowercase )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
" future run" )
else:
if mode == Split.dev:
lowerCamelCase_ = self.processor.get_dev_examples(args.data_dir )
else:
lowerCamelCase_ = self.processor.get_train_examples(args.data_dir )
lowerCamelCase_ , lowerCamelCase_ = squad_convert_examples_to_features(
examples=self.examples , tokenizer=lowercase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=lowercase , )
lowerCamelCase_ = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , lowercase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ) -> Tuple:
return len(self.features )
def __getitem__( self , lowercase ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
lowerCamelCase_ = self.features[i]
lowerCamelCase_ = torch.tensor(feature.input_ids , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.attention_mask , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.cls_index , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.p_mask , dtype=torch.float )
lowerCamelCase_ = torch.tensor(feature.is_impossible , dtype=torch.float )
lowerCamelCase_ = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowerCamelCase_ = torch.tensor(feature.start_position , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 19 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Any = 'timesformer'
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple=2_2_4 , SCREAMING_SNAKE_CASE_ : Dict=1_6 , SCREAMING_SNAKE_CASE_ : str=3 , SCREAMING_SNAKE_CASE_ : List[str]=8 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=7_6_8 , SCREAMING_SNAKE_CASE_ : List[str]=1_2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=1_2 , SCREAMING_SNAKE_CASE_ : Any=3_0_7_2 , SCREAMING_SNAKE_CASE_ : List[Any]="gelu" , SCREAMING_SNAKE_CASE_ : List[str]=0.0 , SCREAMING_SNAKE_CASE_ : Dict=0.0 , SCREAMING_SNAKE_CASE_ : Any=0.02 , SCREAMING_SNAKE_CASE_ : str=1e-6 , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : List[str]="divided_space_time" , SCREAMING_SNAKE_CASE_ : List[Any]=0 , **SCREAMING_SNAKE_CASE_ : Any , ) -> Optional[Any]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = image_size
lowercase_ = patch_size
lowercase_ = num_channels
lowercase_ = num_frames
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = qkv_bias
lowercase_ = attention_type
lowercase_ = drop_path_rate
| 30 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
@staticmethod
@abstractmethod
def SCREAMING_SNAKE_CASE_( lowercase ) -> int:
raise NotImplementedError()
@abstractmethod
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
raise NotImplementedError()
| 19 | 0 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase_ ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float , ) -> tuple:
"""simple docstring"""
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
__A =logging.get_logger(__name__) # pylint: disable=invalid-name
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> List[Any]:
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
lowerCamelCase_ = (
f'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'
f' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , lowercase , standard_warn=lowercase )
lowerCamelCase_ = dict(scheduler.config )
lowerCamelCase_ = 1
lowerCamelCase_ = FrozenDict(lowercase )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
lowerCamelCase_ = (
f'The configuration file of this scheduler: {scheduler} has not set the configuration'
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , lowercase , standard_warn=lowercase )
lowerCamelCase_ = dict(scheduler.config )
lowerCamelCase_ = True
lowerCamelCase_ = FrozenDict(lowercase )
if safety_checker is None:
logger.warning(
f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=lowercase , segmentation_processor=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , unet=lowercase , scheduler=lowercase , safety_checker=lowercase , feature_extractor=lowercase , )
def SCREAMING_SNAKE_CASE_( self , lowercase = "auto" ) -> Tuple:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
self.enable_attention_slicing(lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> str:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowerCamelCase_ = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(lowercase , lowercase )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , lowercase , lowercase , lowercase , lowercase = 512 , lowercase = 512 , lowercase = 50 , lowercase = 7.5 , lowercase = None , lowercase = 1 , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "pil" , lowercase = True , lowercase = None , lowercase = 1 , **lowercase , ) -> int:
lowerCamelCase_ = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
lowerCamelCase_ = self.segmentation_model(**lowercase )
lowerCamelCase_ = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowerCamelCase_ = self.numpy_to_pil(lowercase )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowerCamelCase_ = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=lowercase , image=lowercase , mask_image=lowercase , height=lowercase , width=lowercase , num_inference_steps=lowercase , guidance_scale=lowercase , negative_prompt=lowercase , num_images_per_prompt=lowercase , eta=lowercase , generator=lowercase , latents=lowercase , output_type=lowercase , return_dict=lowercase , callback=lowercase , callback_steps=lowercase , )
| 19 | 0 |
from math import factorial
def SCREAMING_SNAKE_CASE_ ( __A : int = 20 ) -> int:
"""simple docstring"""
a_ : str = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
a_ : Dict = n // 2
return int(factorial(__A ) / (factorial(__A ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
UpperCAmelCase_ : int = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 32 |
from collections import deque
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = len(lowerCamelCase__ )
lowerCamelCase_ = deque()
lowerCamelCase_ = [False for _ in range(lowerCamelCase__ )]
lowerCamelCase_ = [-1 for _ in range(lowerCamelCase__ )]
lowerCamelCase_ = index_of[:]
def strong_connect(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = index # the number when this node is seen
lowerCamelCase_ = index # lowest rank node reachable from here
index += 1
stack.append(lowerCamelCase__ )
lowerCamelCase_ = True
for w in g[v]:
if index_of[w] == -1:
lowerCamelCase_ = strong_connect(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
lowerCamelCase_ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
lowerCamelCase_ = []
lowerCamelCase_ = stack.pop()
lowerCamelCase_ = False
component.append(lowerCamelCase__ )
while w != v:
lowerCamelCase_ = stack.pop()
lowerCamelCase_ = False
component.append(lowerCamelCase__ )
components.append(lowerCamelCase__ )
return index
lowerCamelCase_ = []
for v in range(lowerCamelCase__ ):
if index_of[v] == -1:
strong_connect(lowerCamelCase__ , 0 , lowerCamelCase__ )
return components
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = [[] for _ in range(lowerCamelCase__ )]
for u, v in edges:
g[u].append(lowerCamelCase__ )
return g
if __name__ == "__main__":
# Test
__A =7
__A =[0, 0, 1, 2, 3, 3, 4, 4, 6]
__A =[1, 3, 2, 0, 1, 4, 5, 6, 5]
__A =[(u, v) for u, v in zip(source, target)]
__A =create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 19 | 0 |
"""simple docstring"""
def lowercase ( __snake_case : Optional[int] ):
lowercase_ : int = 0
lowercase_ : Optional[Any] = len(__snake_case )
for i in range(n - 1 ):
for j in range(i + 1 , __snake_case ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def lowercase ( __snake_case : str ):
if len(__snake_case ) <= 1:
return arr, 0
lowercase_ : Optional[Any] = len(__snake_case ) // 2
lowercase_ : List[Any] = arr[0:mid]
lowercase_ : Union[str, Any] = arr[mid:]
lowercase_ , lowercase_ : Tuple = count_inversions_recursive(__snake_case )
lowercase_ , lowercase_ : List[Any] = count_inversions_recursive(__snake_case )
lowercase_ , lowercase_ : List[Any] = _count_cross_inversions(__snake_case , __snake_case )
lowercase_ : List[Any] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def lowercase ( __snake_case : str , __snake_case : Optional[int] ):
lowercase_ : Optional[Any] = []
lowercase_ : Any = 0
while i < len(__snake_case ) and j < len(__snake_case ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(__snake_case ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(__snake_case ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def lowercase ( ):
lowercase_ : Union[str, Any] = [1_0, 2, 1, 5, 5, 2, 1_1]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
lowercase_ : int = count_inversions_bf(__snake_case )
lowercase_ , lowercase_ : Dict = count_inversions_recursive(__snake_case )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , __snake_case )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
lowercase_ : Dict = count_inversions_bf(__snake_case )
lowercase_ , lowercase_ : Dict = count_inversions_recursive(__snake_case )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __snake_case )
# an empty list should also have zero inversions
lowercase_ : List[Any] = []
lowercase_ : Any = count_inversions_bf(__snake_case )
lowercase_ , lowercase_ : List[str] = count_inversions_recursive(__snake_case )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __snake_case )
if __name__ == "__main__":
main()
| 33 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A ={'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 19 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
A =logging.get_logger(__name__)
A ={
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class _a ( __a ):
__a : List[Any] = """marian"""
__a : Union[str, Any] = ["""past_key_values"""]
__a : List[str] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : List[Any] , lowercase : Union[str, Any]=58_101 , lowercase : Tuple=None , lowercase : str=1_024 , lowercase : Optional[int]=12 , lowercase : Optional[int]=4_096 , lowercase : int=16 , lowercase : List[Any]=12 , lowercase : int=4_096 , lowercase : Optional[int]=16 , lowercase : int=0.0 , lowercase : Tuple=0.0 , lowercase : Tuple=True , lowercase : Union[str, Any]=True , lowercase : List[Any]="gelu" , lowercase : Tuple=1_024 , lowercase : str=0.1 , lowercase : str=0.0 , lowercase : Optional[int]=0.0 , lowercase : Dict=0.02 , lowercase : Union[str, Any]=58_100 , lowercase : List[str]=False , lowercase : str=58_100 , lowercase : Any=0 , lowercase : Optional[Any]=0 , lowercase : Tuple=True , **lowercase : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase = vocab_size
UpperCAmelCase = decoder_vocab_size or vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = d_model
UpperCAmelCase = encoder_ffn_dim
UpperCAmelCase = encoder_layers
UpperCAmelCase = encoder_attention_heads
UpperCAmelCase = decoder_ffn_dim
UpperCAmelCase = decoder_layers
UpperCAmelCase = decoder_attention_heads
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = activation_function
UpperCAmelCase = init_std
UpperCAmelCase = encoder_layerdrop
UpperCAmelCase = decoder_layerdrop
UpperCAmelCase = use_cache
UpperCAmelCase = encoder_layers
UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=lowercase , eos_token_id=lowercase , is_encoder_decoder=lowercase , decoder_start_token_id=lowercase , forced_eos_token_id=lowercase , **lowercase , )
class _a ( __a ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def A ( self : int ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
UpperCAmelCase = {0: '''batch'''}
UpperCAmelCase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''decoder_sequence'''}
UpperCAmelCase = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowercase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCAmelCase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
UpperCAmelCase , UpperCAmelCase = self.num_layers
for i in range(lowercase ):
UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''}
UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
UpperCAmelCase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def A ( self : Any ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase = super().outputs
else:
UpperCAmelCase = super(lowercase , self ).outputs
if self.use_past:
UpperCAmelCase , UpperCAmelCase = self.num_layers
for i in range(lowercase ):
UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''}
UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def A ( self : Dict , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
UpperCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
lowercase , lowercase , lowercase , lowercase , lowercase )
# Generate decoder inputs
UpperCAmelCase = seq_length if not self.use_past else 1
UpperCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
lowercase , lowercase , lowercase , lowercase , lowercase )
UpperCAmelCase = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
UpperCAmelCase = dict(**lowercase , **lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCAmelCase , UpperCAmelCase = common_inputs['''input_ids'''].shape
UpperCAmelCase = common_inputs['''decoder_input_ids'''].shape[1]
UpperCAmelCase , UpperCAmelCase = self.num_attention_heads
UpperCAmelCase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase = decoder_seq_length + 3
UpperCAmelCase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCAmelCase = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowercase , lowercase )] , dim=1 )
UpperCAmelCase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCAmelCase , UpperCAmelCase = self.num_layers
UpperCAmelCase = min(lowercase , lowercase )
UpperCAmelCase = max(lowercase , lowercase ) - min_num_layers
UpperCAmelCase = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowercase ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowercase ),
torch.zeros(lowercase ),
torch.zeros(lowercase ),
torch.zeros(lowercase ),
) )
# TODO: test this.
UpperCAmelCase = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowercase , lowercase ):
common_inputs["past_key_values"].append((torch.zeros(lowercase ), torch.zeros(lowercase )) )
return common_inputs
def A ( self : int , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
UpperCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
lowercase , lowercase , lowercase , lowercase , lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCAmelCase , UpperCAmelCase = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
UpperCAmelCase = seqlen + 2
UpperCAmelCase , UpperCAmelCase = self.num_layers
UpperCAmelCase , UpperCAmelCase = self.num_attention_heads
UpperCAmelCase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase = common_inputs['''attention_mask'''].dtype
UpperCAmelCase = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowercase , lowercase , dtype=lowercase )] , dim=1 )
UpperCAmelCase = [
(torch.zeros(lowercase ), torch.zeros(lowercase )) for _ in range(lowercase )
]
return common_inputs
def A ( self : str , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
UpperCAmelCase = compute_effective_axis_dimension(
lowercase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase = tokenizer.num_special_tokens_to_add(lowercase )
UpperCAmelCase = compute_effective_axis_dimension(
lowercase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
UpperCAmelCase = dict(tokenizer(lowercase , return_tensors=lowercase ) )
return common_inputs
def A ( self : List[str] , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
else:
UpperCAmelCase = self._generate_dummy_inputs_for_causal_lm(
lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
return common_inputs
def A ( self : List[Any] , lowercase : Any , lowercase : Tuple , lowercase : Any , lowercase : Any ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase = super()._flatten_past_key_values_(lowercase , lowercase , lowercase , lowercase )
else:
UpperCAmelCase = super(lowercase , self )._flatten_past_key_values_(
lowercase , lowercase , lowercase , lowercase )
@property
def A ( self : Any ):
'''simple docstring'''
return 1E-4
| 34 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A ={'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WavLMForAudioFrameClassification''',
'''WavLMForCTC''',
'''WavLMForSequenceClassification''',
'''WavLMForXVector''',
'''WavLMModel''',
'''WavLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 19 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = "gpt_neox"
def __init__( self : int , snake_case_ : Union[str, Any]=50_432 , snake_case_ : Union[str, Any]=6_144 , snake_case_ : Union[str, Any]=44 , snake_case_ : Any=64 , snake_case_ : Union[str, Any]=24_576 , snake_case_ : List[Any]="gelu" , snake_case_ : Any=0.25 , snake_case_ : Dict=10_000 , snake_case_ : List[Any]=0.0 , snake_case_ : Tuple=0.0 , snake_case_ : Optional[int]=0.1 , snake_case_ : Any=2_048 , snake_case_ : str=0.02 , snake_case_ : Dict=1E-5 , snake_case_ : Union[str, Any]=True , snake_case_ : Dict=0 , snake_case_ : Optional[Any]=2 , snake_case_ : Dict=False , snake_case_ : Optional[Any]=True , snake_case_ : Optional[Any]=None , **snake_case_ : List[Any] , ):
super().__init__(bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
snake_case__ : Tuple = vocab_size
snake_case__ : Union[str, Any] = max_position_embeddings
snake_case__ : Optional[int] = hidden_size
snake_case__ : List[Any] = num_hidden_layers
snake_case__ : Union[str, Any] = num_attention_heads
snake_case__ : List[Any] = intermediate_size
snake_case__ : Tuple = hidden_act
snake_case__ : str = rotary_pct
snake_case__ : Tuple = rotary_emb_base
snake_case__ : List[str] = attention_dropout
snake_case__ : Tuple = hidden_dropout
snake_case__ : Dict = classifier_dropout
snake_case__ : Dict = initializer_range
snake_case__ : Optional[int] = layer_norm_eps
snake_case__ : Optional[int] = use_cache
snake_case__ : Union[str, Any] = tie_word_embeddings
snake_case__ : str = use_parallel_residual
snake_case__ : int = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"""The hidden size is not divisble by the number of attention heads! Make sure to update them!""" )
def lowerCamelCase ( self : List[str] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , snake_case_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f"got {self.rope_scaling}" )
snake_case__ : Optional[int] = self.rope_scaling.get("""type""" , snake_case_ )
snake_case__ : int = self.rope_scaling.get("""factor""" , snake_case_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(snake_case_ , snake_case_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 35 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__A ='''2.13.1'''
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('''3.7'''):
raise ImportWarning(
'''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'''
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'''
'''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'''
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__A =concatenate_datasets
__A =DownloadConfig
__A =DownloadManager
__A =DownloadMode
__A =DownloadConfig
__A =DownloadMode
__A =DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 19 | 0 |
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class UpperCAmelCase_ ( a):
def __init__( self, __a = "▁", __a = True, __a = "<unk>", __a = "</s>", __a = "<pad>", ):
'''simple docstring'''
_lowerCAmelCase : List[str] = {
"pad": {"id": 0, "token": pad_token},
"eos": {"id": 1, "token": eos_token},
"unk": {"id": 2, "token": unk_token},
}
_lowerCAmelCase : str = [None] * len(self.special_tokens)
for token_dict in self.special_tokens.values():
_lowerCAmelCase : Dict = token_dict["token"]
_lowerCAmelCase : int = Tokenizer(Unigram())
_lowerCAmelCase : Any = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(" {2,}"), " "),
normalizers.Lowercase(),
])
_lowerCAmelCase : Tuple = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=__a, add_prefix_space=__a),
pre_tokenizers.Digits(individual_digits=__a),
pre_tokenizers.Punctuation(),
])
_lowerCAmelCase : List[str] = decoders.Metaspace(replacement=__a, add_prefix_space=__a)
_lowerCAmelCase : Tuple = TemplateProcessing(
single=f"$A {self.special_tokens['eos']['token']}", special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])], )
_lowerCAmelCase : List[Any] = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(__a, __a)
def snake_case__ ( self, __a, __a = 8000, __a = True, ):
'''simple docstring'''
_lowerCAmelCase : Dict = trainers.UnigramTrainer(
vocab_size=__a, special_tokens=self.special_tokens_list, show_progress=__a, )
if isinstance(__a, __a):
_lowerCAmelCase : Union[str, Any] = [files]
self._tokenizer.train(__a, trainer=__a)
self.add_unk_id()
def snake_case__ ( self, __a, __a = 8000, __a = True, ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = trainers.UnigramTrainer(
vocab_size=__a, special_tokens=self.special_tokens_list, show_progress=__a, )
self._tokenizer.train_from_iterator(__a, trainer=__a)
self.add_unk_id()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = json.loads(self._tokenizer.to_str())
_lowerCAmelCase : Tuple = self.special_tokens["unk"]["id"]
_lowerCAmelCase : Any = Tokenizer.from_str(json.dumps(__a))
| 36 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A ={
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 19 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''nielsr/canine-s''': 2048,
}
# Unicode defines 1,114,112 total “codepoints”
_lowerCAmelCase = 111_4112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
_lowerCAmelCase = 0
_lowerCAmelCase = 0xe_000
_lowerCAmelCase = 0xe_001
_lowerCAmelCase = 0xe_002
_lowerCAmelCase = 0xe_003
_lowerCAmelCase = 0xe_004
# Maps special codepoints to human-readable names.
_lowerCAmelCase = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
_lowerCAmelCase = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self ,__UpperCAmelCase=chr(__UpperCAmelCase ) ,__UpperCAmelCase=chr(__UpperCAmelCase ) ,__UpperCAmelCase=chr(__UpperCAmelCase ) ,__UpperCAmelCase=chr(__UpperCAmelCase ) ,__UpperCAmelCase=chr(__UpperCAmelCase ) ,__UpperCAmelCase=chr(__UpperCAmelCase ) ,__UpperCAmelCase=False ,__UpperCAmelCase=2048 ,**__UpperCAmelCase ,) -> Optional[int]:
lowerCAmelCase__ : Optional[int] = AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else bos_token
lowerCAmelCase__ : Dict = AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else eos_token
lowerCAmelCase__ : Union[str, Any] = AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else sep_token
lowerCAmelCase__ : int = AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else cls_token
lowerCAmelCase__ : int = AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ : str = AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else mask_token
super().__init__(
bos_token=__UpperCAmelCase ,eos_token=__UpperCAmelCase ,sep_token=__UpperCAmelCase ,cls_token=__UpperCAmelCase ,pad_token=__UpperCAmelCase ,mask_token=__UpperCAmelCase ,add_prefix_space=__UpperCAmelCase ,model_max_length=__UpperCAmelCase ,**__UpperCAmelCase ,)
# Creates a mapping for looking up the IDs of special symbols.
lowerCAmelCase__ : Dict[str, int] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
lowerCAmelCase__ : Tuple = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
lowerCAmelCase__ : Dict[int, str] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
lowerCAmelCase__ : Optional[Any] = UNICODE_VOCAB_SIZE
lowerCAmelCase__ : Any = len(self._special_codepoints )
@property
def UpperCAmelCase_ ( self ) -> int:
return self._unicode_vocab_size
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]:
return list(__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> int:
try:
return ord(__UpperCAmelCase )
except TypeError:
raise ValueError(F"""invalid token: '{token}'""" )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str:
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(__UpperCAmelCase )
except TypeError:
raise ValueError(F"""invalid id: {index}""" )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> int:
return "".join(__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]:
lowerCAmelCase__ : str = [self.sep_token_id]
lowerCAmelCase__ : Optional[int] = [self.cls_token_id]
lowerCAmelCase__ : str = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase ,token_ids_a=__UpperCAmelCase ,already_has_special_tokens=__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = [1] + ([0] * len(__UpperCAmelCase )) + [1]
if token_ids_a is not None:
result += ([0] * len(__UpperCAmelCase )) + [1]
return result
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]:
lowerCAmelCase__ : Optional[int] = [self.sep_token_id]
lowerCAmelCase__ : List[str] = [self.cls_token_id]
lowerCAmelCase__ : Optional[int] = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> int:
return ()
| 37 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , ) -> Optional[int]:
lowerCamelCase_ = parent
lowerCamelCase_ = 13
lowerCamelCase_ = 7
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = 99
lowerCamelCase_ = 32
lowerCamelCase_ = 2
lowerCamelCase_ = 4
lowerCamelCase_ = 37
lowerCamelCase_ = "gelu"
lowerCamelCase_ = 0.1
lowerCamelCase_ = 0.1
lowerCamelCase_ = 512
lowerCamelCase_ = 16
lowerCamelCase_ = 2
lowerCamelCase_ = 0.0_2
lowerCamelCase_ = 3
lowerCamelCase_ = 4
lowerCamelCase_ = None
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = self.prepare_config_and_inputs()
lowerCamelCase_ = True
lowerCamelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Any:
lowerCamelCase_ = TFEsmModel(config=lowercase )
lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = [input_ids, input_mask]
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Tuple:
lowerCamelCase_ = True
lowerCamelCase_ = TFEsmModel(config=lowercase )
lowerCamelCase_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"encoder_hidden_states": encoder_hidden_states,
"encoder_attention_mask": encoder_attention_mask,
}
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = [input_ids, input_mask]
lowerCamelCase_ = model(lowercase , encoder_hidden_states=lowercase )
# Also check the case where encoder outputs are not passed
lowerCamelCase_ = model(lowercase , attention_mask=lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
lowerCamelCase_ = TFEsmForMaskedLM(config=lowercase )
lowerCamelCase_ = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = TFEsmForTokenClassification(config=lowercase )
lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
lowerCamelCase_ = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = TFEsmModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFEsmModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@unittest.skip("Protein models do not support embedding resizing." )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
pass
@unittest.skip("Protein models do not support embedding resizing." )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
pass
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(lowercase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCamelCase_ = model.get_bias()
assert isinstance(lowercase , lowercase )
for k, v in name.items():
assert isinstance(lowercase , tf.Variable )
else:
lowerCamelCase_ = model.get_output_embeddings()
assert x is None
lowerCamelCase_ = model.get_bias()
assert name is None
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
lowerCamelCase_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase_ = model(lowercase )[0]
lowerCamelCase_ = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , lowercase )
# compare the actual values for a slice.
lowerCamelCase_ = tf.constant(
[
[
[8.9_2_1_5_1_8, -1_0.5_8_9_8_1_4, -6.4_6_7_1_3_0_7],
[-6.3_9_6_7_1_5_6, -1_3.9_1_1_3_7_7, -1.1_2_1_1_9_1_5],
[-7.7_8_1_2_4_7, -1_3.9_5_1_5_5_7, -3.7_4_0_5_9_2],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
lowerCamelCase_ = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase_ = model(lowercase )[0]
# compare the actual values for a slice.
lowerCamelCase_ = tf.constant(
[
[
[0.1_4_4_4_3_0_9_2, 0.5_4_1_2_5_3_2_7, 0.3_2_4_7_7_3_9],
[0.3_0_3_4_0_4_8_4, 0.0_0_5_2_6_6_7_6, 0.3_1_0_7_7_7_2_2],
[0.3_2_2_7_8_0_4_3, -0.2_4_9_8_7_0_9_6, 0.3_4_1_4_6_2_8],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 19 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
snake_case__ : Optional[int] = CycleDiffusionPipeline
snake_case__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""negative_prompt""",
"""height""",
"""width""",
"""negative_prompt_embeds""",
}
snake_case__ : Any = PipelineTesterMixin.required_optional_params - {"""latents"""}
snake_case__ : Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""source_prompt"""} )
snake_case__ : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS
snake_case__ : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _A ( self : str ):
torch.manual_seed(0 )
UpperCamelCase :Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
UpperCamelCase :Union[str, Any] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , num_train_timesteps=1_000 , clip_sample=__lowerCamelCase , set_alpha_to_one=__lowerCamelCase , )
torch.manual_seed(0 )
UpperCamelCase :Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase :List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
UpperCamelCase :List[Any] = CLIPTextModel(__lowerCamelCase )
UpperCamelCase :Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCamelCase :List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _A ( self : List[Any] , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any]=0 ):
UpperCamelCase :Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
UpperCamelCase :List[str] = image / 2 + 0.5
if str(__lowerCamelCase ).startswith("""mps""" ):
UpperCamelCase :Union[str, Any] = torch.manual_seed(__lowerCamelCase )
else:
UpperCamelCase :List[Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
UpperCamelCase :Optional[Any] = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def _A ( self : Optional[int] ):
UpperCamelCase :Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase :str = self.get_dummy_components()
UpperCamelCase :int = CycleDiffusionPipeline(**__lowerCamelCase )
UpperCamelCase :Dict = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase :Tuple = self.get_dummy_inputs(__lowerCamelCase )
UpperCamelCase :Any = pipe(**__lowerCamelCase )
UpperCamelCase :Dict = output.images
UpperCamelCase :Any = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
UpperCamelCase :int = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def _A ( self : Optional[int] ):
UpperCamelCase :Dict = self.get_dummy_components()
for name, module in components.items():
if hasattr(__lowerCamelCase , """half""" ):
UpperCamelCase :Optional[Any] = module.half()
UpperCamelCase :int = CycleDiffusionPipeline(**__lowerCamelCase )
UpperCamelCase :Optional[Any] = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase :Any = self.get_dummy_inputs(__lowerCamelCase )
UpperCamelCase :Optional[int] = pipe(**__lowerCamelCase )
UpperCamelCase :List[str] = output.images
UpperCamelCase :Optional[Any] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
UpperCamelCase :List[str] = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _A ( self : List[str] ):
return super().test_save_load_local()
@unittest.skip("""non-deterministic pipeline""" )
def _A ( self : Optional[int] ):
return super().test_inference_batch_single_identical()
@skip_mps
def _A ( self : Optional[int] ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _A ( self : List[str] ):
return super().test_save_load_optional_components()
@skip_mps
def _A ( self : Tuple ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _A ( self : Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self : Tuple ):
UpperCamelCase :int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
UpperCamelCase :List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" )
UpperCamelCase :Any = init_image.resize((512, 512) )
UpperCamelCase :Dict = """CompVis/stable-diffusion-v1-4"""
UpperCamelCase :List[Any] = DDIMScheduler.from_pretrained(__lowerCamelCase , subfolder="""scheduler""" )
UpperCamelCase :str = CycleDiffusionPipeline.from_pretrained(
__lowerCamelCase , scheduler=__lowerCamelCase , safety_checker=__lowerCamelCase , torch_dtype=torch.floataa , revision="""fp16""" )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
UpperCamelCase :List[Any] = """A black colored car"""
UpperCamelCase :Tuple = """A blue colored car"""
UpperCamelCase :Tuple = torch.manual_seed(0 )
UpperCamelCase :List[Any] = pipe(
prompt=__lowerCamelCase , source_prompt=__lowerCamelCase , image=__lowerCamelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__lowerCamelCase , output_type="""np""" , )
UpperCamelCase :Optional[int] = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def _A ( self : Optional[Any] ):
UpperCamelCase :Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
UpperCamelCase :Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" )
UpperCamelCase :Optional[Any] = init_image.resize((512, 512) )
UpperCamelCase :str = """CompVis/stable-diffusion-v1-4"""
UpperCamelCase :Union[str, Any] = DDIMScheduler.from_pretrained(__lowerCamelCase , subfolder="""scheduler""" )
UpperCamelCase :str = CycleDiffusionPipeline.from_pretrained(__lowerCamelCase , scheduler=__lowerCamelCase , safety_checker=__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
UpperCamelCase :Dict = """A black colored car"""
UpperCamelCase :str = """A blue colored car"""
UpperCamelCase :List[Any] = torch.manual_seed(0 )
UpperCamelCase :Optional[int] = pipe(
prompt=__lowerCamelCase , source_prompt=__lowerCamelCase , image=__lowerCamelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__lowerCamelCase , output_type="""np""" , )
UpperCamelCase :Optional[int] = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 38 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
__A =logging.get_logger(__name__)
def lowerCamelCase_ ( ):
# Get the sagemaker specific mp parameters from smp_options variable.
lowerCamelCase_ = os.getenv("SM_HP_MP_PARAMETERS" , "{}" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
lowerCamelCase_ = json.loads(lowerCamelCase__ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
lowerCamelCase_ = os.getenv("SM_FRAMEWORK_PARAMS" , "{}" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
lowerCamelCase_ = json.loads(lowerCamelCase__ )
if not mpi_options.get("sagemaker_mpi_enabled" , lowerCamelCase__ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = field(
default='' , metadata={'help': 'Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'} , )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." , lowercase , )
@cached_property
def SCREAMING_SNAKE_CASE_( self ) -> "torch.device":
logger.info("PyTorch: setting up devices" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" )
if self.no_cuda:
lowerCamelCase_ = torch.device("cpu" )
lowerCamelCase_ = 0
elif is_sagemaker_model_parallel_available():
lowerCamelCase_ = smp.local_rank()
lowerCamelCase_ = torch.device("cuda" , lowercase )
lowerCamelCase_ = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta )
lowerCamelCase_ = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) )
lowerCamelCase_ = torch.device("cuda" , self.local_rank )
lowerCamelCase_ = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
lowerCamelCase_ = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
lowerCamelCase_ = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta )
lowerCamelCase_ = torch.device("cuda" , self.local_rank )
lowerCamelCase_ = 1
if device.type == "cuda":
torch.cuda.set_device(lowercase )
return device
@property
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
return not is_sagemaker_model_parallel_available()
@property
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
return False
| 19 | 0 |
from collections import deque
def __A ( __lowerCAmelCase )-> str:
"""simple docstring"""
_UpperCAmelCase = len(__lowerCAmelCase )
_UpperCAmelCase = deque()
_UpperCAmelCase = [False for _ in range(__lowerCAmelCase )]
_UpperCAmelCase = [-1 for _ in range(__lowerCAmelCase )]
_UpperCAmelCase = index_of[:]
def strong_connect(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase = index # the number when this node is seen
_UpperCAmelCase = index # lowest rank node reachable from here
index += 1
stack.append(__lowerCAmelCase )
_UpperCAmelCase = True
for w in g[v]:
if index_of[w] == -1:
_UpperCAmelCase = strong_connect(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
_UpperCAmelCase = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
_UpperCAmelCase = []
_UpperCAmelCase = stack.pop()
_UpperCAmelCase = False
component.append(__lowerCAmelCase )
while w != v:
_UpperCAmelCase = stack.pop()
_UpperCAmelCase = False
component.append(__lowerCAmelCase )
components.append(__lowerCAmelCase )
return index
_UpperCAmelCase = []
for v in range(__lowerCAmelCase ):
if index_of[v] == -1:
strong_connect(__lowerCAmelCase , 0 , __lowerCAmelCase )
return components
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> List[str]:
"""simple docstring"""
_UpperCAmelCase = [[] for _ in range(__lowerCAmelCase )]
for u, v in edges:
g[u].append(__lowerCAmelCase )
return g
if __name__ == "__main__":
# Test
_a = 7
_a = [0, 0, 1, 2, 3, 3, 4, 4, 6]
_a = [1, 3, 2, 0, 1, 4, 5, 6, 5]
_a = [(u, v) for u, v in zip(source, target)]
_a = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 39 |
import math
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ = 0 , lowerCamelCase__ = 0 ):
lowerCamelCase_ = end or len(lowerCamelCase__ )
for i in range(lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = i
lowerCamelCase_ = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
lowerCamelCase_ = array[temp_index - 1]
temp_index -= 1
lowerCamelCase_ = temp_index_value
return array
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): # Max Heap
lowerCamelCase_ = index
lowerCamelCase_ = 2 * index + 1 # Left Node
lowerCamelCase_ = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
lowerCamelCase_ = left_index
if right_index < heap_size and array[largest] < array[right_index]:
lowerCamelCase_ = right_index
if largest != index:
lowerCamelCase_ , lowerCamelCase_ = array[largest], array[index]
heapify(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = len(lowerCamelCase__ )
for i in range(n // 2 , -1 , -1 ):
heapify(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
for i in range(n - 1 , 0 , -1 ):
lowerCamelCase_ , lowerCamelCase_ = array[0], array[i]
heapify(lowerCamelCase__ , 0 , lowerCamelCase__ )
return array
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = low
lowerCamelCase_ = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
lowerCamelCase_ , lowerCamelCase_ = array[j], array[i]
i += 1
def lowerCamelCase_ ( lowerCamelCase__ ):
if len(lowerCamelCase__ ) == 0:
return array
lowerCamelCase_ = 2 * math.ceil(math.loga(len(lowerCamelCase__ ) ) )
lowerCamelCase_ = 1_6
return intro_sort(lowerCamelCase__ , 0 , len(lowerCamelCase__ ) , lowerCamelCase__ , lowerCamelCase__ )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(lowerCamelCase__ )
max_depth -= 1
lowerCamelCase_ = median_of_a(lowerCamelCase__ , lowerCamelCase__ , start + ((end - start) // 2) + 1 , end - 1 )
lowerCamelCase_ = partition(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
intro_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = p
return insertion_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
__A =input('''Enter numbers separated by a comma : ''').strip()
__A =[float(item) for item in user_input.split(''',''')]
print(sort(unsorted))
| 19 | 0 |
"""simple docstring"""
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def lowercase ( A_=32 , A_=10 , A_=100 , A_=1_026 , A_=True , A_="data/tokenized_stories_train_wikitext103.jbl" , A_="igf_context_pairs.jbl" , )-> List[str]:
'''simple docstring'''
set_seed(3 )
# generate train_data and objective_set
a , a : Dict = generate_datasets(
A_ , A_ , number=A_ , min_len=1_026 , trim=A_ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
a : List[Any] = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# load pretrained model
a : Union[str, Any] = load_gpta("gpt2" ).to(A_ )
print("computing perplexity on objective set" )
a : int = compute_perplexity(A_ , A_ , A_ ).item()
print("perplexity on objective set:" , A_ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def lowercase ( A_ , A_=15 , A_=128 , A_=100 , A_="igf_model.pt" , )-> Any:
'''simple docstring'''
set_seed(42 )
# Load pre-trained model
a : int = GPTaLMHeadModel.from_pretrained("gpt2" )
# Initialize secondary learner to use embedding weights of model
a : int = SecondaryLearner(A_ )
# Train secondary learner
a : Optional[Any] = train_secondary_learner(
A_ , A_ , max_epochs=A_ , batch_size=A_ , eval_freq=100 , igf_model_path=A_ , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def lowercase ( A_ , A_ , A_ , A_=32 , A_=1_000 , A_=16 , A_=1.0 , A_=recopy_gpta , A_=None , A_=10 , A_="gpt2_finetuned.pt" , )-> Union[str, Any]:
'''simple docstring'''
a : Optional[Any] = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
a : List[str] = RandomSampler(A_ )
a : Tuple = DataLoader(A_ , sampler=A_ )
a : Union[str, Any] = max_steps // (len(A_ )) + 1
a : List[Any] = 0
a : str = torch.zeros((1, context_len) , dtype=torch.long , device=A_ )
a , a , a : Dict = recopy_model(A_ , A_ , A_ )
model.train()
if secondary_learner is not None:
secondary_learner.to(A_ )
secondary_learner.eval()
a : Tuple = []
a : List[Any] = 0
a : str = []
a : Optional[Any] = []
# Compute the performance of the transformer model at the beginning
a : Union[str, Any] = compute_perplexity(A_ , A_ , A_ )
test_perps.append(A_ )
print("Test perplexity, step" , A_ , ":" , A_ )
for epoch in range(int(A_ ) ):
for step, example in enumerate(A_ ):
torch.cuda.empty_cache()
a : str = random.randint(0 , example.size(2 ) - context_len - 1 )
a : List[Any] = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
a : List[str] = model(A_ , labels=A_ )
a : Optional[Any] = True
if secondary_learner is not None:
a : Tuple = secondary_learner.forward(
torch.tensor(A_ , dtype=torch.long , device=A_ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(A_ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
a : List[Any] = -1
if predicted_q < threshold:
a : Tuple = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
a : Union[str, Any] = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
a : Dict = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
a : Dict = compute_perplexity(A_ , A_ , A_ )
test_perps.append(A_ )
print("Test perplexity, step" , A_ , ":" , A_ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , A_ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def lowercase ( )-> Optional[int]:
'''simple docstring'''
a : Any = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task" )
# Required parameters
parser.add_argument(
"--data_dir" , default=A_ , type=A_ , required=A_ , help="The input data dir. Should contain data files for WikiText." , )
parser.add_argument(
"--model_name_or_path" , default=A_ , type=A_ , required=A_ , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--data_file" , type=A_ , default=A_ , help=(
"A jbl file containing tokenized data which can be split as objective dataset, "
"train_dataset and test_dataset."
) , )
parser.add_argument(
"--igf_data_file" , type=A_ , default=A_ , help="A jbl file containing the context and information gain pairs to train secondary learner." , )
parser.add_argument(
"--output_dir" , default=A_ , type=A_ , required=A_ , help="The output directory where the final fine-tuned model is stored." , )
parser.add_argument(
"--tokenizer_name" , default=A_ , type=A_ , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument("--seed" , type=A_ , default=A_ , help="A seed for reproducible training." )
parser.add_argument(
"--context_len" , default=32 , type=A_ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--size_objective_set" , default=100 , type=A_ , help="number of articles that are long enough to be used as our objective set" , )
parser.add_argument(
"--eval_freq" , default=100 , type=A_ , help="secondary model evaluation is triggered at eval_freq" )
parser.add_argument("--max_steps" , default=1_000 , type=A_ , help="To calculate training epochs" )
parser.add_argument(
"--secondary_learner_batch_size" , default=128 , type=A_ , help="batch size of training data for secondary learner" , )
parser.add_argument(
"--batch_size" , default=16 , type=A_ , help="batch size of training data of language model(gpt2) " )
parser.add_argument(
"--eval_interval" , default=10 , type=A_ , help=(
"decay the selectivity of our secondary learner filter from"
"1 standard deviation above average to 1 below average after 10 batches"
) , )
parser.add_argument(
"--number" , default=100 , type=A_ , help="The number of examples split to be used as objective_set/test_data" )
parser.add_argument(
"--min_len" , default=1_026 , type=A_ , help="The minimum length of the article to be used as objective set" )
parser.add_argument(
"--secondary_learner_max_epochs" , default=15 , type=A_ , help="number of epochs to train secondary learner" )
parser.add_argument("--trim" , default=A_ , type=A_ , help="truncate the example if it exceeds context length" )
parser.add_argument(
"--threshold" , default=1.0 , type=A_ , help=(
"The threshold value used by secondary learner to filter the train_data and allow only"
" informative data as input to the model"
) , )
parser.add_argument("--finetuned_model_name" , default="gpt2_finetuned.pt" , type=A_ , help="finetuned_model_name" )
parser.add_argument(
"--recopy_model" , default=A_ , type=A_ , help="Reset the model to the original pretrained GPT-2 weights after each iteration" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1_026 , trim=A_ , data_file="data/tokenized_stories_train_wikitext103.jbl" , igf_data_file="igf_context_pairs.jbl" , )
# Load train data for secondary learner
a : Union[str, Any] = joblib.load("data/IGF_values.jbl" )
# Train secondary learner
a : Optional[Any] = training_secondary_learner(
A_ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="igf_model.pt" , )
# load pretrained gpt2 model
a : List[str] = GPTaLMHeadModel.from_pretrained("gpt2" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
a , a : str = generate_datasets(
context_len=32 , file="data/tokenized_stories_train_wikitext103.jbl" , number=100 , min_len=1_026 , trim=A_ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
A_ , A_ , A_ , context_len=32 , max_steps=1_000 , batch_size=16 , threshold=1.0 , recopy_model=A_ , secondary_learner=A_ , eval_interval=10 , finetuned_model_name="gpt2_finetuned.pt" , )
if __name__ == "__main__":
main()
| 40 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , *lowercase , lowercase=None , lowercase=None , **lowercase ) -> List[str]:
super().__init__(*lowercase , **lowercase )
lowerCamelCase_ = eval_examples
lowerCamelCase_ = post_process_function
def SCREAMING_SNAKE_CASE_( self , lowercase = None , lowercase=None , lowercase = None , lowercase = "eval" , **lowercase , ) -> Dict[str, float]:
lowerCamelCase_ = gen_kwargs.copy()
lowerCamelCase_ = (
gen_kwargs["max_length"] if gen_kwargs.get("max_length" ) is not None else self.args.generation_max_length
)
lowerCamelCase_ = (
gen_kwargs["num_beams"] if gen_kwargs.get("num_beams" ) is not None else self.args.generation_num_beams
)
lowerCamelCase_ = gen_kwargs
lowerCamelCase_ = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCamelCase_ = self.get_eval_dataloader(lowercase )
lowerCamelCase_ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase_ = self.compute_metrics
lowerCamelCase_ = None
lowerCamelCase_ = time.time()
lowerCamelCase_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCamelCase_ = eval_loop(
lowercase , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase , metric_key_prefix=lowercase , )
finally:
lowerCamelCase_ = compute_metrics
lowerCamelCase_ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
lowercase , lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowerCamelCase_ = self.post_process_function(lowercase , lowercase , lowercase )
lowerCamelCase_ = self.compute_metrics(lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowerCamelCase_ = metrics.pop(lowercase )
metrics.update(output.metrics )
else:
lowerCamelCase_ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowercase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowerCamelCase_ = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase )
return metrics
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase=None , lowercase = "test" , **lowercase ) -> Union[str, Any]:
lowerCamelCase_ = gen_kwargs.copy()
lowerCamelCase_ = self.get_test_dataloader(lowercase )
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase_ = self.compute_metrics
lowerCamelCase_ = None
lowerCamelCase_ = time.time()
lowerCamelCase_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCamelCase_ = eval_loop(
lowercase , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase , metric_key_prefix=lowercase , )
finally:
lowerCamelCase_ = compute_metrics
lowerCamelCase_ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
lowercase , lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCamelCase_ = self.post_process_function(lowercase , lowercase , lowercase , "predict" )
lowerCamelCase_ = self.compute_metrics(lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowerCamelCase_ = metrics.pop(lowercase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase )
| 19 | 0 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class _lowercase :
def __init__( self: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Dict=2 , UpperCamelCase__: Optional[int]=True , UpperCamelCase__: Any=False , UpperCamelCase__: Dict=10 , UpperCamelCase__: List[str]=3 , UpperCamelCase__: int=32 * 4 , UpperCamelCase__: List[str]=32 * 6 , UpperCamelCase__: Optional[int]=4 , UpperCamelCase__: int=32 , ):
lowerCamelCase__ : List[str] = parent
lowerCamelCase__ : List[Any] = batch_size
lowerCamelCase__ : Union[str, Any] = is_training
lowerCamelCase__ : List[Any] = use_auxiliary_loss
lowerCamelCase__ : List[Any] = num_queries
lowerCamelCase__ : List[str] = num_channels
lowerCamelCase__ : Optional[Any] = min_size
lowerCamelCase__ : List[Any] = max_size
lowerCamelCase__ : int = num_labels
lowerCamelCase__ : Optional[int] = mask_feature_size
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=UpperCamelCase__ ) > 0.5
).float()
lowerCamelCase__ : Tuple = (torch.rand((self.batch_size, self.num_labels) , device=UpperCamelCase__ ) > 0.5).long()
lowerCamelCase__ : Tuple = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCamelCase_ ( self: Optional[int] ):
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.prepare_config_and_inputs()
lowerCamelCase__ : Tuple = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : Optional[Any] = output.encoder_hidden_states
lowerCamelCase__ : List[Any] = output.pixel_decoder_hidden_states
lowerCamelCase__ : str = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(UpperCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCamelCase__ ) , config.decoder_config.decoder_layers )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[int]=False ):
with torch.no_grad():
lowerCamelCase__ : Optional[int] = MaskFormerModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Dict = model(pixel_values=UpperCamelCase__ , pixel_mask=UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ , output_hidden_states=UpperCamelCase__ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: int , UpperCamelCase__: Dict , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Any , UpperCamelCase__: List[str] ):
lowerCamelCase__ : List[Any] = MaskFormerForInstanceSegmentation(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
def comm_check_on_output(UpperCamelCase__: List[str] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowerCamelCase__ : Optional[int] = model(pixel_values=UpperCamelCase__ , pixel_mask=UpperCamelCase__ )
lowerCamelCase__ : Any = model(UpperCamelCase__ )
comm_check_on_output(UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = model(
pixel_values=UpperCamelCase__ , pixel_mask=UpperCamelCase__ , mask_labels=UpperCamelCase__ , class_labels=UpperCamelCase__ )
comm_check_on_output(UpperCamelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
a = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Any = MaskFormerModelTester(self )
lowerCamelCase__ : List[Any] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCamelCase__ , **UpperCamelCase__ , output_hidden_states=UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*UpperCamelCase__ )
@unittest.skip(reason="""MaskFormer does not use inputs_embeds""" )
def lowerCamelCase_ ( self: str ):
pass
@unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" )
def lowerCamelCase_ ( self: str ):
pass
@unittest.skip(reason="""MaskFormer is not a generative model""" )
def lowerCamelCase_ ( self: Union[str, Any] ):
pass
@unittest.skip(reason="""MaskFormer does not use token embeddings""" )
def lowerCamelCase_ ( self: Any ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def lowerCamelCase_ ( self: Optional[Any] ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase_ ( self: List[Any] ):
pass
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : int = [*signature.parameters.keys()]
lowerCamelCase__ : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: List[str] ):
for model_name in ["facebook/maskformer-swin-small-coco"]:
lowerCamelCase__ : Any = MaskFormerModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Union[str, Any] = (self.model_tester.min_size,) * 2
lowerCamelCase__ : Dict = {
"""pixel_values""": torch.randn((2, 3, *size) , device=UpperCamelCase__ ),
"""mask_labels""": torch.randn((2, 10, *size) , device=UpperCamelCase__ ),
"""class_labels""": torch.zeros(2 , 10 , device=UpperCamelCase__ ).long(),
}
lowerCamelCase__ : List[str] = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(UpperCamelCase__ )
lowerCamelCase__ : List[str] = model(**UpperCamelCase__ )
self.assertTrue(outputs.loss is not None )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCamelCase__ , **UpperCamelCase__ , output_hidden_states=UpperCamelCase__ )
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : str = model_class(UpperCamelCase__ ).to(UpperCamelCase__ )
lowerCamelCase__ : List[str] = model(**UpperCamelCase__ , output_attentions=UpperCamelCase__ )
self.assertTrue(outputs.attentions is not None )
def lowerCamelCase_ ( self: Union[str, Any] ):
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
lowerCamelCase__ : Optional[Any] = self.all_model_classes[1]
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Dict = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.train()
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ , mask_labels=UpperCamelCase__ , class_labels=UpperCamelCase__ ).loss
loss.backward()
def lowerCamelCase_ ( self: str ):
# only MaskFormerForInstanceSegmentation has the loss
lowerCamelCase__ : List[str] = self.all_model_classes[1]
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : Tuple = True
lowerCamelCase__ : int = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.train()
lowerCamelCase__ : Tuple = model(UpperCamelCase__ , mask_labels=UpperCamelCase__ , class_labels=UpperCamelCase__ )
lowerCamelCase__ : List[Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowerCamelCase__ : Optional[int] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
lowerCamelCase__ : int = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowerCamelCase__ : str = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=UpperCamelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_A : Optional[int] =1e-4
def SCREAMING_SNAKE_CASE_ () -> Optional[int]:
lowerCamelCase__ : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Dict ):
return (
MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" )
if is_vision_available()
else None
)
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Optional[Any] = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(UpperCamelCase__ )
lowerCamelCase__ : str = self.default_image_processor
lowerCamelCase__ : Dict = prepare_img()
lowerCamelCase__ : List[str] = image_processor(UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
lowerCamelCase__ : Any = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCamelCase__ , (1, 3, 800, 1_088) )
with torch.no_grad():
lowerCamelCase__ : List[str] = model(**UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = torch.tensor(
[[-0.0_482, 0.9_228, 0.4_951], [-0.2_547, 0.8_017, 0.8_527], [-0.0_069, 0.3_385, -0.0_089]] ).to(UpperCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
lowerCamelCase__ : Optional[Any] = torch.tensor(
[[-0.8_422, -0.8_434, -0.9_718], [-1.0_144, -0.5_565, -0.4_195], [-1.0_038, -0.4_484, -0.1_961]] ).to(UpperCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
lowerCamelCase__ : List[str] = torch.tensor(
[[0.2_852, -0.0_159, 0.9_735], [0.6_254, 0.1_858, 0.8_529], [-0.0_680, -0.4_116, 1.8_413]] ).to(UpperCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Union[str, Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(UpperCamelCase__ )
.eval()
)
lowerCamelCase__ : Optional[int] = self.default_image_processor
lowerCamelCase__ : List[Any] = prepare_img()
lowerCamelCase__ : Tuple = image_processor(UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCamelCase__ , (1, 3, 800, 1_088) )
with torch.no_grad():
lowerCamelCase__ : Dict = model(**UpperCamelCase__ )
# masks_queries_logits
lowerCamelCase__ : List[str] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowerCamelCase__ : int = [
[-1.3_737_124, -1.7_724_937, -1.9_364_233],
[-1.5_977_281, -1.9_867_939, -2.1_523_695],
[-1.5_795_398, -1.9_269_832, -2.093_942],
]
lowerCamelCase__ : int = torch.tensor(UpperCamelCase__ ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
# class_queries_logits
lowerCamelCase__ : Any = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCamelCase__ : Tuple = torch.tensor(
[
[1.6_512e00, -5.2_572e00, -3.3_519e00],
[3.6_169e-02, -5.9_025e00, -2.9_313e00],
[1.0_766e-04, -7.7_630e00, -5.1_263e00],
] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : Optional[int] = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" )
.to(UpperCamelCase__ )
.eval()
)
lowerCamelCase__ : str = self.default_image_processor
lowerCamelCase__ : List[str] = prepare_img()
lowerCamelCase__ : List[Any] = image_processor(UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
lowerCamelCase__ : List[str] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCamelCase__ , (1, 3, 800, 1_088) )
with torch.no_grad():
lowerCamelCase__ : Any = model(**UpperCamelCase__ )
# masks_queries_logits
lowerCamelCase__ : int = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowerCamelCase__ : Optional[Any] = [[-0.9_046, -2.6_366, -4.6_062], [-3.4_179, -5.7_890, -8.8_057], [-4.9_179, -7.6_560, -10.7_711]]
lowerCamelCase__ : List[Any] = torch.tensor(UpperCamelCase__ ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
# class_queries_logits
lowerCamelCase__ : Dict = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCamelCase__ : Optional[int] = torch.tensor(
[[4.7_188, -3.2_585, -2.8_857], [6.6_871, -2.9_181, -1.2_487], [7.2_449, -2.2_764, -2.1_874]] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Optional[int] = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(UpperCamelCase__ )
.eval()
)
lowerCamelCase__ : Any = self.default_image_processor
lowerCamelCase__ : Optional[Any] = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="""pt""" , )
lowerCamelCase__ : Optional[int] = inputs["""pixel_values"""].to(UpperCamelCase__ )
lowerCamelCase__ : int = [el.to(UpperCamelCase__ ) for el in inputs["""mask_labels"""]]
lowerCamelCase__ : int = [el.to(UpperCamelCase__ ) for el in inputs["""class_labels"""]]
with torch.no_grad():
lowerCamelCase__ : List[Any] = model(**UpperCamelCase__ )
self.assertTrue(outputs.loss is not None )
| 41 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__A =logging.get_logger(__name__) # pylint: disable=invalid-name
__A ='''
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
>>> repo = "openai/shap-e-img2img"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"
>>> image = load_image(image_url).convert("RGB")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], "corgi_3d.gif")
```
'''
@dataclass
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 42
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> List[str]:
super().__init__()
self.register_modules(
prior=lowercase , image_encoder=lowercase , image_processor=lowercase , scheduler=lowercase , renderer=lowercase , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
if latents is None:
lowerCamelCase_ = randn_tensor(lowercase , generator=lowercase , device=lowercase , dtype=lowercase )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowerCamelCase_ = latents.to(lowercase )
lowerCamelCase_ = latents * scheduler.init_noise_sigma
return latents
def SCREAMING_SNAKE_CASE_( self , lowercase=0 ) -> int:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowerCamelCase_ = torch.device(f'cuda:{gpu_id}' )
lowerCamelCase_ = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase , lowercase )
@property
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder , "_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(lowercase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , ) -> List[str]:
if isinstance(lowercase , lowercase ) and isinstance(image[0] , torch.Tensor ):
lowerCamelCase_ = torch.cat(lowercase , axis=0 ) if image[0].ndim == 4 else torch.stack(lowercase , axis=0 )
if not isinstance(lowercase , torch.Tensor ):
lowerCamelCase_ = self.image_processor(lowercase , return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
lowerCamelCase_ = image.to(dtype=self.image_encoder.dtype , device=lowercase )
lowerCamelCase_ = self.image_encoder(lowercase )["last_hidden_state"]
lowerCamelCase_ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowerCamelCase_ = image_embeds.repeat_interleave(lowercase , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase_ = torch.zeros_like(lowercase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase_ = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(lowercase )
def __call__( self , lowercase , lowercase = 1 , lowercase = 25 , lowercase = None , lowercase = None , lowercase = 4.0 , lowercase = 64 , lowercase = "pil" , lowercase = True , ) -> Union[str, Any]:
if isinstance(lowercase , PIL.Image.Image ):
lowerCamelCase_ = 1
elif isinstance(lowercase , torch.Tensor ):
lowerCamelCase_ = image.shape[0]
elif isinstance(lowercase , lowercase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowerCamelCase_ = len(lowercase )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(lowercase )}' )
lowerCamelCase_ = self._execution_device
lowerCamelCase_ = batch_size * num_images_per_prompt
lowerCamelCase_ = guidance_scale > 1.0
lowerCamelCase_ = self._encode_image(lowercase , lowercase , lowercase , lowercase )
# prior
self.scheduler.set_timesteps(lowercase , device=lowercase )
lowerCamelCase_ = self.scheduler.timesteps
lowerCamelCase_ = self.prior.config.num_embeddings
lowerCamelCase_ = self.prior.config.embedding_dim
lowerCamelCase_ = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , lowercase , lowercase , lowercase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowerCamelCase_ = latents.reshape(latents.shape[0] , lowercase , lowercase )
for i, t in enumerate(self.progress_bar(lowercase ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ = self.scheduler.scale_model_input(lowercase , lowercase )
lowerCamelCase_ = self.prior(
lowercase , timestep=lowercase , proj_embedding=lowercase , ).predicted_image_embedding
# remove the variance
lowerCamelCase_ , lowerCamelCase_ = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowerCamelCase_ , lowerCamelCase_ = noise_pred.chunk(2 )
lowerCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowerCamelCase_ = self.scheduler.step(
lowercase , timestep=lowercase , sample=lowercase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=lowercase )
lowerCamelCase_ = []
for i, latent in enumerate(lowercase ):
print()
lowerCamelCase_ = self.renderer.decode(
latent[None, :] , lowercase , size=lowercase , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(lowercase )
lowerCamelCase_ = torch.stack(lowercase )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
lowerCamelCase_ = images.cpu().numpy()
if output_type == "pil":
lowerCamelCase_ = [self.numpy_to_pil(lowercase ) for image in images]
# Offload last model to CPU
if hasattr(self , "final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=lowercase )
| 19 | 0 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
lowercase : Dict = ["gpt2"]
lowercase : Any = "gpt2"
if is_tf_available():
class __UpperCAmelCase ( tf.Module ):
def __init__( self , lowerCAmelCase_ ):
"""simple docstring"""
super().__init__()
_snake_case = tokenizer
_snake_case = AutoConfig.from_pretrained(lowerCAmelCase_ )
_snake_case = TFGPTaLMHeadModel.from_config(lowerCAmelCase_ )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='text' ),) )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self.tokenizer(lowerCAmelCase_ )
_snake_case = tokenized['input_ids'].to_tensor()
_snake_case = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
_snake_case = self.model(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )['logits']
return outputs
@require_tf
@require_keras_nlp
class __UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
"""simple docstring"""
super().setUp()
_snake_case = [GPTaTokenizer.from_pretrained(lowerCAmelCase_ ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
_snake_case = [TFGPTaTokenizer.from_pretrained(lowerCAmelCase_ ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
_snake_case = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
_snake_case = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def lowerCamelCase ( self ):
"""simple docstring"""
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
_snake_case = tokenizer([test_inputs] , return_tensors='tf' )
_snake_case = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
_snake_case = python_outputs[key].numpy()
_snake_case = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(lowerCAmelCase_ , tf.intaa ) == tf_outputs_values ) )
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
_snake_case = tf.function(lowerCAmelCase_ )
for test_inputs in self.test_sentences:
_snake_case = tf.constant(lowerCAmelCase_ )
_snake_case = compiled_tokenizer(lowerCAmelCase_ )
_snake_case = tf_tokenizer(lowerCAmelCase_ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
_snake_case = ModelToSave(tokenizer=lowerCAmelCase_ )
_snake_case = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case = model.serving(lowerCAmelCase_ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_snake_case = Path(lowerCAmelCase_ ) / 'saved.model'
tf.saved_model.save(lowerCAmelCase_ , lowerCAmelCase_ , signatures={'serving_default': model.serving} )
_snake_case = tf.saved_model.load(lowerCAmelCase_ )
_snake_case = loaded_model.signatures['serving_default'](lowerCAmelCase_ )['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
_snake_case = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case = tf_tokenizer(lowerCAmelCase_ ) # Build model with some sample inputs
_snake_case = tf_tokenizer.get_config()
_snake_case = TFGPTaTokenizer.from_config(lowerCAmelCase_ )
_snake_case = model_from_config(lowerCAmelCase_ )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
_snake_case = 12_31_23
for max_length in [3, 5, 10_24]:
_snake_case = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case = tf_tokenizer(lowerCAmelCase_ , max_length=lowerCAmelCase_ )
_snake_case = out['input_ids'].numpy().shape[1]
assert out_length == max_length
| 42 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowerCamelCase_ ( ):
lowerCamelCase_ , lowerCamelCase_ = 9, 1_4 # noqa: F841
lowerCamelCase_ = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 1_4],
[3, 4, 9],
[5, 4, 1_0],
[1, 7, 1_1],
]
lowerCamelCase_ = defaultdict(lowerCamelCase__ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
lowerCamelCase_ = mst(lowerCamelCase__ )
lowerCamelCase_ = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
lowerCamelCase_ = tuple(answer[:2] )
lowerCamelCase_ = tuple(edge[::-1] )
assert edge in result or reverse in result
| 19 | 0 |
from random import randint, random
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = 5 , ):
'''simple docstring'''
__UpperCamelCase :Optional[int] = [[-1] * number_of_cells] # Create a highway without any car
__UpperCamelCase :Optional[int] = 0
__UpperCamelCase :Any = max(SCREAMING_SNAKE_CASE , 0 )
while i < number_of_cells:
__UpperCamelCase :Union[str, Any] = (
randint(0 , SCREAMING_SNAKE_CASE ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Any = 0
__UpperCamelCase :Optional[int] = highway_now[car_index + 1 :]
for cell in range(len(SCREAMING_SNAKE_CASE ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(SCREAMING_SNAKE_CASE , -1 )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[int] = len(SCREAMING_SNAKE_CASE )
# Beforce calculations, the highway is empty
__UpperCamelCase :List[str] = [-1] * number_of_cells
for car_index in range(SCREAMING_SNAKE_CASE ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
__UpperCamelCase :Optional[int] = min(highway_now[car_index] + 1 , SCREAMING_SNAKE_CASE )
# Number of empty cell before the next car
__UpperCamelCase :Dict = get_distance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) - 1
# We can't have the car causing an accident
__UpperCamelCase :Tuple = min(next_highway[car_index] , SCREAMING_SNAKE_CASE )
if random() < probability:
# Randomly, a driver will slow down
__UpperCamelCase :Optional[Any] = max(next_highway[car_index] - 1 , 0 )
return next_highway
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = len(highway[0] )
for i in range(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :int = update(highway[i] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Dict = [-1] * number_of_cells
for car_index in range(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Dict = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
__UpperCamelCase :Dict = (car_index + speed) % number_of_cells
# Commit the change of position
__UpperCamelCase :Union[str, Any] = speed
highway.append(SCREAMING_SNAKE_CASE )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__A =1_6
__A =3_2
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ = 1_6 ):
lowerCamelCase_ = AutoTokenizer.from_pretrained("bert-base-cased" )
lowerCamelCase_ = load_dataset("glue" , "mrpc" )
def tokenize_function(lowerCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase_ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase_ = datasets.map(
lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase_ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowerCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase_ = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase_ = 1_6
elif accelerator.mixed_precision != "no":
lowerCamelCase_ = 8
else:
lowerCamelCase_ = None
return tokenizer.pad(
lowerCamelCase__ , padding="longest" , max_length=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_tensors="pt" , )
# Instantiate dataloaders.
lowerCamelCase_ = DataLoader(
tokenized_datasets["train"] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
lowerCamelCase_ = DataLoader(
tokenized_datasets["validation"] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__A =mocked_dataloaders # noqa: F811
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowerCamelCase__ ) == "1":
lowerCamelCase_ = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
lowerCamelCase_ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
lowerCamelCase_ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase_ = config["lr"]
lowerCamelCase_ = int(config["num_epochs"] )
lowerCamelCase_ = int(config["seed"] )
lowerCamelCase_ = int(config["batch_size"] )
set_seed(lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ = get_dataloaders(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
lowerCamelCase_ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCamelCase_ = batch_size // MAX_GPU_BATCH_SIZE
lowerCamelCase_ = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase_ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowerCamelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase_ = model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase_ = AdamW(params=model.parameters() , lr=lowerCamelCase__ )
# Instantiate scheduler
lowerCamelCase_ = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowerCamelCase__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
lowerCamelCase_ = os.path.split(lowerCamelCase__ )[-1].split("." )[0]
accelerator.init_trackers(lowerCamelCase__ , lowerCamelCase__ )
# Now we train the model
for epoch in range(lowerCamelCase__ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
lowerCamelCase_ = 0
for step, batch in enumerate(lowerCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCamelCase_ = model(**lowerCamelCase__ )
lowerCamelCase_ = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
lowerCamelCase_ = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase_ = model(**lowerCamelCase__ )
lowerCamelCase_ = outputs.logits.argmax(dim=-1 )
lowerCamelCase_ , lowerCamelCase_ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=lowerCamelCase__ , references=lowerCamelCase__ , )
lowerCamelCase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , lowerCamelCase__ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"accuracy": eval_metric["accuracy"],
"f1": eval_metric["f1"],
"train_loss": total_loss.item() / len(lowerCamelCase__ ),
"epoch": epoch,
} , step=lowerCamelCase__ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def lowerCamelCase_ ( ):
lowerCamelCase_ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowerCamelCase__ , default=lowerCamelCase__ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=lowerCamelCase__ , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = {"lr": 2e-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6}
training_function(lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
main()
| 19 | 0 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE_ )
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : str = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
_UpperCamelCase : ClassVar[Features] = Features({"image": Image()} )
_UpperCamelCase : ClassVar[Features] = Features({"labels": ClassLabel} )
_UpperCamelCase : str = "image"
_UpperCamelCase : str = "labels"
def __A ( self , a__ ):
if self.label_column not in features:
raise ValueError(F"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , a__ ):
raise ValueError(F"Column {self.label_column} is not a ClassLabel." )
_lowerCAmelCase : str = copy.deepcopy(self )
_lowerCAmelCase : Dict = self.label_schema.copy()
_lowerCAmelCase : str = features[self.label_column]
_lowerCAmelCase : Optional[Any] = label_schema
return task_template
@property
def __A ( self ):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 44 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__A =None
__A =logging.get_logger(__name__)
__A ={'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__A ={
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
__A ={
'''facebook/mbart-large-en-ro''': 1_0_2_4,
'''facebook/mbart-large-cc25''': 1_0_2_4,
}
# fmt: off
__A =['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = ['input_ids', 'attention_mask']
lowerCAmelCase__ = MBartTokenizer
lowerCAmelCase__ = []
lowerCAmelCase__ = []
def __init__( self , lowercase=None , lowercase=None , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=None , lowercase=None , lowercase=None , **lowercase , ) -> Dict:
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else mask_token
super().__init__(
vocab_file=lowercase , tokenizer_file=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , src_lang=lowercase , tgt_lang=lowercase , additional_special_tokens=lowercase , **lowercase , )
lowerCamelCase_ = vocab_file
lowerCamelCase_ = False if not self.vocab_file else True
lowerCamelCase_ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
lowerCamelCase_ = {
lang_code: self.convert_tokens_to_ids(lowercase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase_ = src_lang if src_lang is not None else "en_XX"
lowerCamelCase_ = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def SCREAMING_SNAKE_CASE_( self ) -> str:
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , **lowercase ) -> List[Any]:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
lowerCamelCase_ = src_lang
lowerCamelCase_ = self(lowercase , add_special_tokens=lowercase , return_tensors=lowercase , **lowercase )
lowerCamelCase_ = self.convert_tokens_to_ids(lowercase )
lowerCamelCase_ = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = "en_XX" , lowercase = None , lowercase = "ro_RO" , **lowercase , ) -> BatchEncoding:
lowerCamelCase_ = src_lang
lowerCamelCase_ = tgt_lang
return super().prepare_seqaseq_batch(lowercase , lowercase , **lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = self.convert_tokens_to_ids(lowercase )
lowerCamelCase_ = []
lowerCamelCase_ = [self.eos_token_id, self.cur_lang_code]
lowerCamelCase_ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase_ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = self.convert_tokens_to_ids(lowercase )
lowerCamelCase_ = []
lowerCamelCase_ = [self.eos_token_id, self.cur_lang_code]
lowerCamelCase_ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase_ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.' )
return
lowerCamelCase_ = os.path.join(
lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ):
copyfile(self.vocab_file , lowercase )
return (out_vocab_file,)
| 19 | 0 |
"""simple docstring"""
import math
def lowercase ( lowerCAmelCase__ : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase ( lowerCAmelCase__ : float = 0.1 ) -> int:
__a = 3
__a = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(lowerCAmelCase__ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__A =pytest.mark.integration
@require_faiss
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(lowercase ) for x in np.arange(30 ).tolist()]} )
return dset
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
lowerCamelCase_ = dset.map(
lambda lowercase , lowercase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=lowercase , keep_in_memory=lowercase )
lowerCamelCase_ = dset.add_faiss_index("vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
dset.drop_index("vecs" )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase ) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name )
dset.load_faiss_index("vecs2" , tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(lowercase , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
from elasticsearch import Elasticsearch
lowerCamelCase_ = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
lowerCamelCase_ = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
lowerCamelCase_ = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=lowercase )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("filename" , "my_name-train_29" )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
@require_faiss
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase )
self.assertRaises(lowercase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCamelCase_ = np.eye(5 , dtype=np.floataa )[::-1]
lowerCamelCase_ , lowerCamelCase_ = index.search_batch(lowercase )
self.assertRaises(lowercase , index.search_batch , queries[0] )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
import faiss
lowerCamelCase_ = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCamelCase_ = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(lowercase ):
lowerCamelCase_ = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
import faiss
lowerCamelCase_ = faiss.IndexFlat(5 )
lowerCamelCase_ = FaissIndex(custom_index=lowercase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase ) as tmp_file:
index.save(tmp_file.name )
lowerCamelCase_ = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def lowerCamelCase_ ( lowerCamelCase__ ):
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowerCamelCase_ = "index.faiss"
lowerCamelCase_ = F'mock://{index_name}'
index.save(lowerCamelCase__ , storage_options=mockfs.storage_options )
lowerCamelCase_ = FaissIndex.load(lowerCamelCase__ , storage_options=mockfs.storage_options )
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ , lowerCamelCase_ = index.search(lowerCamelCase__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
lowerCamelCase_ = Elasticsearch()
lowerCamelCase_ = {"acknowledged": True}
lowerCamelCase_ = ElasticSearchIndex(es_client=lowercase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
lowerCamelCase_ = "foo"
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCamelCase_ = "foo"
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCamelCase_ = ["foo", "bar", "foobar"]
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search_batch(lowercase )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase ) , 0 )
self.assertListEqual([1, 1, 1] , lowercase )
# batched queries with timeout
lowerCamelCase_ = ["foo", "bar", "foobar"]
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search_batch(lowercase , request_timeout=30 )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase ) , 0 )
self.assertListEqual([1, 1, 1] , lowercase )
| 19 | 0 |
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : bytes ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase = F'a bytes-like object is required, not \'{data.__class__.__name__}\''
raise TypeError(SCREAMING_SNAKE_CASE )
lowerCAmelCase = """""".join(bin(SCREAMING_SNAKE_CASE )[2:].zfill(8 ) for byte in data )
lowerCAmelCase = len(SCREAMING_SNAKE_CASE ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowerCAmelCase = B"""=""" * ((6 - len(SCREAMING_SNAKE_CASE ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(SCREAMING_SNAKE_CASE ) % 6)
else:
lowerCAmelCase = B""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(SCREAMING_SNAKE_CASE ) , 6 ) ).encode()
+ padding
)
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase = (
"""argument should be a bytes-like object or ASCII string, """
F'not \'{encoded_data.__class__.__name__}\''
)
raise TypeError(SCREAMING_SNAKE_CASE )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
try:
lowerCAmelCase = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
lowerCAmelCase = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(SCREAMING_SNAKE_CASE ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowerCAmelCase = encoded_data[:-padding]
lowerCAmelCase = """""".join(
bin(B64_CHARSET.index(SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowerCAmelCase = """""".join(
bin(B64_CHARSET.index(SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data )
lowerCAmelCase = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(SCREAMING_SNAKE_CASE ) , 8 )
]
return bytes(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.0_2 , lowercase=3 , lowercase=4 , lowercase=None , ) -> List[str]:
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
lowerCamelCase_ = self.vocab_size - 1
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowerCamelCase_ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> Dict:
lowerCamelCase_ = OpenAIGPTModel(config=lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , head_mask=lowercase )
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase )
lowerCamelCase_ = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> int:
lowerCamelCase_ = OpenAIGPTLMHeadModel(lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> Dict:
lowerCamelCase_ = OpenAIGPTDoubleHeadsModel(lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> int:
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = OpenAIGPTForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowerCAmelCase__ = (
{
'feature-extraction': OpenAIGPTModel,
'text-classification': OpenAIGPTForSequenceClassification,
'text-generation': OpenAIGPTLMHeadModel,
'zero-shot': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase=False ) -> Any:
lowerCamelCase_ = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowerCamelCase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowercase , )
lowerCamelCase_ = inputs_dict["labels"]
lowerCamelCase_ = inputs_dict["labels"]
lowerCamelCase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowercase , )
lowerCamelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
return inputs_dict
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = OpenAIGPTModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=lowercase , n_embd=37 )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowercase )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Any:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = OpenAIGPTModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" )
model.to(lowercase )
lowerCamelCase_ = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=lowercase ) # the president is
lowerCamelCase_ = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowerCamelCase_ = model.generate(lowercase , do_sample=lowercase )
self.assertListEqual(output_ids[0].tolist() , lowercase )
| 19 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class A__ ( A__ , unittest.TestCase ):
A__ = ShapEPipeline
A__ = ['prompt']
A__ = ['prompt']
A__ = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
A__ = False
@property
def A ( self : int ) -> int:
'''simple docstring'''
return 32
@property
def A ( self : Dict ) -> List[Any]:
'''simple docstring'''
return 32
@property
def A ( self : int ) -> List[Any]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def A ( self : str ) -> Optional[Any]:
'''simple docstring'''
return 8
@property
def A ( self : Union[str, Any] ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def A ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_a )
@property
def A ( self : Tuple ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE ={
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
_SCREAMING_SNAKE_CASE =PriorTransformer(**_a )
return model
@property
def A ( self : Dict ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE ={
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
_SCREAMING_SNAKE_CASE =ShapERenderer(**_a )
return model
def A ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.dummy_prior
_SCREAMING_SNAKE_CASE =self.dummy_text_encoder
_SCREAMING_SNAKE_CASE =self.dummy_tokenizer
_SCREAMING_SNAKE_CASE =self.dummy_renderer
_SCREAMING_SNAKE_CASE =HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=_a , clip_sample=_a , clip_sample_range=1.0 , )
_SCREAMING_SNAKE_CASE ={
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def A ( self : int , _a : Optional[Any] , _a : Any=0 ) -> Union[str, Any]:
'''simple docstring'''
if str(_a ).startswith('mps' ):
_SCREAMING_SNAKE_CASE =torch.manual_seed(_a )
else:
_SCREAMING_SNAKE_CASE =torch.Generator(device=_a ).manual_seed(_a )
_SCREAMING_SNAKE_CASE ={
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def A ( self : int ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE ='cpu'
_SCREAMING_SNAKE_CASE =self.get_dummy_components()
_SCREAMING_SNAKE_CASE =self.pipeline_class(**_a )
_SCREAMING_SNAKE_CASE =pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =pipe(**self.get_dummy_inputs(_a ) )
_SCREAMING_SNAKE_CASE =output.images[0]
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_SCREAMING_SNAKE_CASE =np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A ( self : Dict ) -> List[str]:
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def A ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =torch_device == 'cpu'
_SCREAMING_SNAKE_CASE =True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_a , relax_max_difference=_a , )
def A ( self : str ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_dummy_components()
_SCREAMING_SNAKE_CASE =self.pipeline_class(**_a )
_SCREAMING_SNAKE_CASE =pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =2
_SCREAMING_SNAKE_CASE =self.get_dummy_inputs(_a )
for key in inputs.keys():
if key in self.batch_params:
_SCREAMING_SNAKE_CASE =batch_size * [inputs[key]]
_SCREAMING_SNAKE_CASE =pipe(**_a , num_images_per_prompt=_a )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def A ( self : str ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
_SCREAMING_SNAKE_CASE =ShapEPipeline.from_pretrained('openai/shap-e' )
_SCREAMING_SNAKE_CASE =pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =torch.Generator(device=_a ).manual_seed(0 )
_SCREAMING_SNAKE_CASE =pipe(
'a shark' , generator=_a , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_a , _a )
| 47 |
__A ={str(digit): digit**5 for digit in range(1_0)}
def lowerCamelCase_ ( lowerCamelCase__ ):
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(lowerCamelCase__ ) )
def lowerCamelCase_ ( ):
return sum(
number
for number in range(1_0_0_0 , 1_0_0_0_0_0_0 )
if number == digits_fifth_powers_sum(lowerCamelCase__ ) )
if __name__ == "__main__":
print(solution())
| 19 | 0 |
def A ( _SCREAMING_SNAKE_CASE ) -> Any:
lowerCamelCase : Optional[Any] = []
lowerCamelCase : str = set({"(", "[", "{"} )
lowerCamelCase : int = set({")", "]", "}"} )
lowerCamelCase : Union[str, Any] = {"{": "}", "[": "]", "(": ")"}
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(_SCREAMING_SNAKE_CASE ) == 0 or (len(_SCREAMING_SNAKE_CASE ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(_SCREAMING_SNAKE_CASE ) == 0
def A ( ) -> Union[str, Any]:
lowerCamelCase : int = input("Enter sequence of brackets: " )
if is_balanced(_SCREAMING_SNAKE_CASE ):
print(_SCREAMING_SNAKE_CASE ,"is balanced" )
else:
print(_SCREAMING_SNAKE_CASE ,"is not balanced" )
if __name__ == "__main__":
main()
| 48 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A =logging.get_logger(__name__)
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowerCamelCase_ = 1_9_2
lowerCamelCase_ = 7_6_8
lowerCamelCase_ = 1_2
lowerCamelCase_ = 3
lowerCamelCase_ = [8_0_0, 1_3_3_3]
lowerCamelCase_ = False
elif yolos_name == "yolos_s_dWr":
lowerCamelCase_ = 3_3_0
lowerCamelCase_ = 1_4
lowerCamelCase_ = 6
lowerCamelCase_ = 1_3_2_0
elif "yolos_s" in yolos_name:
lowerCamelCase_ = 3_8_4
lowerCamelCase_ = 1_5_3_6
lowerCamelCase_ = 1_2
lowerCamelCase_ = 6
elif "yolos_b" in yolos_name:
lowerCamelCase_ = [8_0_0, 1_3_4_4]
lowerCamelCase_ = 9_1
lowerCamelCase_ = "huggingface/label-files"
lowerCamelCase_ = "coco-detection-id2label.json"
lowerCamelCase_ = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowerCamelCase_ = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase_ = idalabel
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase_ = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
lowerCamelCase_ = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase_ = in_proj_weight[: config.hidden_size, :]
lowerCamelCase_ = in_proj_bias[: config.hidden_size]
lowerCamelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase_ = in_proj_weight[-config.hidden_size :, :]
lowerCamelCase_ = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_ ( lowerCamelCase__ ):
if "backbone" in name:
lowerCamelCase_ = name.replace("backbone" , "vit" )
if "cls_token" in name:
lowerCamelCase_ = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
lowerCamelCase_ = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
lowerCamelCase_ = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
lowerCamelCase_ = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowerCamelCase_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
lowerCamelCase_ = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
lowerCamelCase_ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowerCamelCase_ = name.replace("attn" , "attention.self" )
if "norm1" in name:
lowerCamelCase_ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowerCamelCase_ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowerCamelCase_ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowerCamelCase_ = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
lowerCamelCase_ = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
lowerCamelCase_ = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
lowerCamelCase_ = name.replace("vit.norm" , "vit.layernorm" )
return name
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
for key in orig_state_dict.copy().keys():
lowerCamelCase_ = orig_state_dict.pop(lowerCamelCase__ )
if "qkv" in key:
lowerCamelCase_ = key.split("." )
lowerCamelCase_ = int(key_split[2] )
lowerCamelCase_ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowerCamelCase_ = val[:dim, :]
lowerCamelCase_ = val[
dim : dim * 2, :
]
lowerCamelCase_ = val[-dim:, :]
else:
lowerCamelCase_ = val[:dim]
lowerCamelCase_ = val[dim : dim * 2]
lowerCamelCase_ = val[-dim:]
else:
lowerCamelCase_ = val
return orig_state_dict
def lowerCamelCase_ ( ):
lowerCamelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase_ = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
lowerCamelCase_ = get_yolos_config(lowerCamelCase__ )
# load original state_dict
lowerCamelCase_ = torch.load(lowerCamelCase__ , map_location="cpu" )["model"]
# load 🤗 model
lowerCamelCase_ = YolosForObjectDetection(lowerCamelCase__ )
model.eval()
lowerCamelCase_ = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image, prepared by YolosImageProcessor
lowerCamelCase_ = 8_0_0 if yolos_name != "yolos_ti" else 5_1_2
lowerCamelCase_ = YolosImageProcessor(format="coco_detection" , size=lowerCamelCase__ )
lowerCamelCase_ = image_processor(images=prepare_img() , return_tensors="pt" )
lowerCamelCase_ = model(**lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ = outputs.logits, outputs.pred_boxes
lowerCamelCase_ , lowerCamelCase_ = None, None
if yolos_name == "yolos_ti":
lowerCamelCase_ = torch.tensor(
[[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]] )
lowerCamelCase_ = torch.tensor(
[[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]] )
elif yolos_name == "yolos_s_200_pre":
lowerCamelCase_ = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] )
lowerCamelCase_ = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] )
elif yolos_name == "yolos_s_300_pre":
lowerCamelCase_ = torch.tensor(
[[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]] )
lowerCamelCase_ = torch.tensor(
[[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]] )
elif yolos_name == "yolos_s_dWr":
lowerCamelCase_ = torch.tensor(
[[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]] )
lowerCamelCase_ = torch.tensor(
[[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]] )
elif yolos_name == "yolos_base":
lowerCamelCase_ = torch.tensor(
[[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]] )
lowerCamelCase_ = torch.tensor(
[[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]] )
else:
raise ValueError(F'Unknown yolos_name: {yolos_name}' )
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(F'Saving model {yolos_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
lowerCamelCase_ = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
lowerCamelCase_ = model_mapping[yolos_name]
image_processor.push_to_hub(lowerCamelCase__ , organization="hustvl" )
model.push_to_hub(lowerCamelCase__ , organization="hustvl" )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__A =parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 19 | 0 |
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def __snake_case ( _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class _A :
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
pass
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Any):
'''simple docstring'''
pass
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any=None , **__SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
__a = VisionTextDualEncoderConfig.from_vision_text_configs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = TFVisionTextDualEncoderModel(__SCREAMING_SNAKE_CASE)
__a = model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE)
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim))
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim))
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str=None , **__SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
__a , __a = self.get_vision_text_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = TFVisionTextDualEncoderModel(vision_model=__SCREAMING_SNAKE_CASE , text_model=__SCREAMING_SNAKE_CASE)
__a = model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE)
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim))
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple=None , **__SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
__a , __a = self.get_vision_text_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = {'''vision_model''': vision_model, '''text_model''': text_model}
__a = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**__SCREAMING_SNAKE_CASE)
__a = model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE)
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim))
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str]=None , **__SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
__a , __a = self.get_vision_text_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = TFVisionTextDualEncoderModel(vision_model=__SCREAMING_SNAKE_CASE , text_model=__SCREAMING_SNAKE_CASE)
__a = model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE)
__a = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__SCREAMING_SNAKE_CASE)
__a = TFVisionTextDualEncoderModel.from_pretrained(__SCREAMING_SNAKE_CASE)
__a = model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE)
__a = after_output[0].numpy()
__a = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1E-5)
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int=None , **__SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
__a , __a = self.get_vision_text_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = TFVisionTextDualEncoderModel(vision_model=__SCREAMING_SNAKE_CASE , text_model=__SCREAMING_SNAKE_CASE)
__a = model(
input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , output_attentions=__SCREAMING_SNAKE_CASE)
__a = output.vision_model_output.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , vision_config.num_hidden_layers)
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__a = to_atuple(vision_model.config.image_size)
__a = to_atuple(vision_model.config.patch_size)
__a = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__a = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
__a = output.text_model_output.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : float):
'''simple docstring'''
__a = np.abs((a - b)).max()
self.assertLessEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , F'Difference between torch and flax is {diff} (>= {tol}).')
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
self.check_save_load(**__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__SCREAMING_SNAKE_CASE)
@slow
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a , __a = self.get_pretrained_model_and_inputs()
__a = model_a(**__SCREAMING_SNAKE_CASE)
__a = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__SCREAMING_SNAKE_CASE)
__a = TFVisionTextDualEncoderModel.from_pretrained(__SCREAMING_SNAKE_CASE)
__a = model_a(**__SCREAMING_SNAKE_CASE)
__a = after_outputs[0].numpy()
__a = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1E-5)
@require_tf
class _A ( __UpperCAmelCase ,unittest.TestCase ):
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-random-bert''')
__a = 13
__a = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
__a = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size)
__a = random_attention_mask([batch_size, 4])
__a = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
__a = TFViTModel(__SCREAMING_SNAKE_CASE , name='''vision_model''')
__a = TFBertModel(__SCREAMING_SNAKE_CASE , name='''text_model''')
return vision_model, text_model
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = TFViTModelTester(self)
__a = TFBertModelTester(self)
__a = vit_model_tester.prepare_config_and_inputs()
__a = bert_model_tester.prepare_config_and_inputs()
__a , __a , __a = vision_config_and_inputs
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _A ( __UpperCAmelCase ,unittest.TestCase ):
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''Rocketknight1/tiny-random-deit-tf''' , '''hf-internal-testing/tiny-random-roberta''')
__a = 13
__a = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
__a = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size)
__a = random_attention_mask([batch_size, 4])
__a = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str]=None , **__SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
__a , __a = self.get_vision_text_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = TFVisionTextDualEncoderModel(vision_model=__SCREAMING_SNAKE_CASE , text_model=__SCREAMING_SNAKE_CASE)
__a = model(
input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , output_attentions=__SCREAMING_SNAKE_CASE)
__a = output.vision_model_output.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , vision_config.num_hidden_layers)
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__a = to_atuple(vision_model.config.image_size)
__a = to_atuple(vision_model.config.patch_size)
__a = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__a = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
__a = output.text_model_output.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
__a = TFDeiTModel(__SCREAMING_SNAKE_CASE , name='''vision_model''')
__a = TFRobertaModel(__SCREAMING_SNAKE_CASE , name='''text_model''')
return vision_model, text_model
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = TFDeiTModelTester(self)
__a = TFRobertaModelTester(self)
__a = vit_model_tester.prepare_config_and_inputs()
__a = bert_model_tester.prepare_config_and_inputs()
__a , __a , __a = vision_config_and_inputs
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _A ( __UpperCAmelCase ,unittest.TestCase ):
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''Rocketknight1/tiny-random-clip-tf''' , '''hf-internal-testing/tiny-random-bert''')
__a = 13
__a = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
__a = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size)
__a = random_attention_mask([batch_size, 4])
__a = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
__a = TFCLIPVisionModel(__SCREAMING_SNAKE_CASE , name='''vision_model''')
__a = TFBertModel(__SCREAMING_SNAKE_CASE , name='''text_model''')
return vision_model, text_model
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = TFCLIPVisionModelTester(self)
__a = TFBertModelTester(self)
__a = clip_model_tester.prepare_config_and_inputs()
__a = bert_model_tester.prepare_config_and_inputs()
__a , __a = vision_config_and_inputs
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class _A ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = TFVisionTextDualEncoderModel.from_pretrained(
'''clip-italian/clip-italian''' , logit_scale_init_value=1.0 , from_pt=__SCREAMING_SNAKE_CASE)
__a = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''')
__a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
__a = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] , images=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors='''np''')
__a = model(**__SCREAMING_SNAKE_CASE)
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]))
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__a = np.array([[1.2_28_47_27, 0.3_10_41_22]])
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , __SCREAMING_SNAKE_CASE , atol=1E-3))
| 49 |
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = [0 for i in range(r + 1 )]
# nc0 = 1
lowerCamelCase_ = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
lowerCamelCase_ = min(lowerCamelCase__ , lowerCamelCase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=1_0, r=5))
| 19 | 0 |
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
_UpperCAmelCase : str = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"""--original_config_file""",
default=None,
type=str,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--scheduler_type""",
default="""pndm""",
type=str,
help="""Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']""",
)
parser.add_argument(
"""--pipeline_type""",
default=None,
type=str,
help=(
"""The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"""
""". If `None` pipeline will be automatically inferred."""
),
)
parser.add_argument(
"""--image_size""",
default=None,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--prediction_type""",
default=None,
type=str,
help=(
"""The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"""
""" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
parser.add_argument(
"""--stable_unclip""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.""",
)
parser.add_argument(
"""--stable_unclip_prior""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.""",
)
parser.add_argument(
"""--clip_stats_path""",
type=str,
help="""Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.""",
required=False,
)
parser.add_argument(
"""--controlnet""", action="""store_true""", default=None, help="""Set flag if this is a controlnet checkpoint."""
)
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--vae_path""",
type=str,
default=None,
required=False,
help="""Set to a path, hub id to an already converted vae to not convert it again.""",
)
_UpperCAmelCase : List[Any] = parser.parse_args()
_UpperCAmelCase : str = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 50 |
import math
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(lowerCamelCase__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("This should never happen" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
__A ='''Enter the base and the power separated by a comma: '''
__A, __A =map(int, input(prompt).split(''','''))
__A, __A =map(int, input(prompt).split(''','''))
# We find the log of each number, using the function res(), which takes two
# arguments.
__A =res(xa, ya)
__A =res(xa, ya)
# We check for the largest number
if resa > resa:
print('''Largest number is''', xa, '''^''', ya)
elif resa > resa:
print('''Largest number is''', xa, '''^''', ya)
else:
print('''Both are equal''')
| 19 | 0 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
snake_case_ : List[Any] = data_utils.TransfoXLTokenizer
snake_case_ : int = data_utils.TransfoXLCorpus
snake_case_ : List[Any] = data_utils
snake_case_ : int = data_utils
def A (__A : Dict , __A : List[Any] , __A : Union[str, Any] , __A : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(__A , '''rb''' ) as fp:
UpperCAmelCase_ = pickle.load(__A , encoding='''latin1''' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
UpperCAmelCase_ = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file''']
print(F"""Save vocabulary to {pytorch_vocab_dump_path}""" )
UpperCAmelCase_ = corpus.vocab.__dict__
torch.save(__A , __A )
UpperCAmelCase_ = corpus.__dict__
corpus_dict_no_vocab.pop('''vocab''' , __A )
UpperCAmelCase_ = pytorch_dump_folder_path + '''/''' + CORPUS_NAME
print(F"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(__A , __A )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
UpperCAmelCase_ = os.path.abspath(__A )
UpperCAmelCase_ = os.path.abspath(__A )
print(F"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
UpperCAmelCase_ = TransfoXLConfig()
else:
UpperCAmelCase_ = TransfoXLConfig.from_json_file(__A )
print(F"""Building PyTorch model from configuration: {config}""" )
UpperCAmelCase_ = TransfoXLLMHeadModel(__A )
UpperCAmelCase_ = load_tf_weights_in_transfo_xl(__A , __A , __A )
# Save pytorch-model
UpperCAmelCase_ = os.path.join(__A , __A )
UpperCAmelCase_ = os.path.join(__A , __A )
print(F"""Save PyTorch model to {os.path.abspath(__A )}""" )
torch.save(model.state_dict() , __A )
print(F"""Save configuration file to {os.path.abspath(__A )}""" )
with open(__A , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
snake_case_ : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--tf_checkpoint_path",
default="",
type=str,
help="An optional path to a TensorFlow checkpoint path to be converted.",
)
parser.add_argument(
"--transfo_xl_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--transfo_xl_dataset_file",
default="",
type=str,
help="An optional dataset file to be converted in a vocabulary.",
)
snake_case_ : int = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 51 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
__A =logging.get_logger(__name__)
__A =list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
__A =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Model type selected in the list: ' + ', '.join(snake_case_ )} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowerCAmelCase__ = field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase__ = field(
default=1_28 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowerCAmelCase__ = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowerCAmelCase__ = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowerCAmelCase__ = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase__ = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase__ = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowerCAmelCase__ = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 'train'
lowerCAmelCase__ = 'dev'
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __init__( self , lowercase , lowercase , lowercase = None , lowercase = Split.train , lowercase = False , lowercase = None , lowercase = "pt" , ) -> List[str]:
lowerCamelCase_ = args
lowerCamelCase_ = is_language_sensitive
lowerCamelCase_ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowercase , lowercase ):
try:
lowerCamelCase_ = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
lowerCamelCase_ = mode
# Load data features from cache or dataset file
lowerCamelCase_ = "v2" if args.version_2_with_negative else "v1"
lowerCamelCase_ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase_ = cached_features_file + ".lock"
with FileLock(lowercase ):
if os.path.exists(lowercase ) and not args.overwrite_cache:
lowerCamelCase_ = time.time()
lowerCamelCase_ = torch.load(lowercase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowerCamelCase_ = self.old_features["features"]
lowerCamelCase_ = self.old_features.get("dataset" , lowercase )
lowerCamelCase_ = self.old_features.get("examples" , lowercase )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
" future run" )
else:
if mode == Split.dev:
lowerCamelCase_ = self.processor.get_dev_examples(args.data_dir )
else:
lowerCamelCase_ = self.processor.get_train_examples(args.data_dir )
lowerCamelCase_ , lowerCamelCase_ = squad_convert_examples_to_features(
examples=self.examples , tokenizer=lowercase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=lowercase , )
lowerCamelCase_ = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , lowercase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ) -> Tuple:
return len(self.features )
def __getitem__( self , lowercase ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
lowerCamelCase_ = self.features[i]
lowerCamelCase_ = torch.tensor(feature.input_ids , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.attention_mask , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.cls_index , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.p_mask , dtype=torch.float )
lowerCamelCase_ = torch.tensor(feature.is_impossible , dtype=torch.float )
lowerCamelCase_ = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowerCamelCase_ = torch.tensor(feature.start_position , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 19 | 0 |
import pprint
import requests
__lowerCamelCase : Tuple = """https://zenquotes.io/api"""
def A_ ( ) -> list:
return requests.get(API_ENDPOINT_URL + "/today" ).json()
def A_ ( ) -> list:
return requests.get(API_ENDPOINT_URL + "/random" ).json()
if __name__ == "__main__":
__lowerCamelCase : Optional[int] = random_quotes()
pprint.pprint(response)
| 52 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
@staticmethod
@abstractmethod
def SCREAMING_SNAKE_CASE_( lowercase ) -> int:
raise NotImplementedError()
@abstractmethod
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
raise NotImplementedError()
| 19 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Optional[int] =logging.get_logger(__name__)
a__ : int ={
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json''',
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any ="gpt_neox_japanese"
def __init__( self : Any , __A : List[Any]=3_2_0_0_0 , __A : Tuple=2_5_6_0 , __A : str=3_2 , __A : int=3_2 , __A : Optional[int]=4 , __A : Optional[Any]="gelu" , __A : List[Any]=1.00 , __A : str=1_0_0_0_0 , __A : Union[str, Any]=2_0_4_8 , __A : str=0.02 , __A : Optional[int]=1e-5 , __A : str=True , __A : Any=3_1_9_9_6 , __A : Union[str, Any]=3_1_9_9_9 , __A : Dict=0.1 , __A : str=0.0 , **__A : Dict , ):
super().__init__(bos_token_id=__A , eos_token_id=__A , **__A )
__UpperCamelCase = vocab_size
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_multiple_size
__UpperCamelCase = hidden_act
__UpperCamelCase = rotary_pct
__UpperCamelCase = rotary_emb_base
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = use_cache
__UpperCamelCase = attention_dropout
__UpperCamelCase = hidden_dropout
| 53 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
__A =logging.get_logger(__name__) # pylint: disable=invalid-name
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> List[Any]:
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
lowerCamelCase_ = (
f'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'
f' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , lowercase , standard_warn=lowercase )
lowerCamelCase_ = dict(scheduler.config )
lowerCamelCase_ = 1
lowerCamelCase_ = FrozenDict(lowercase )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
lowerCamelCase_ = (
f'The configuration file of this scheduler: {scheduler} has not set the configuration'
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , lowercase , standard_warn=lowercase )
lowerCamelCase_ = dict(scheduler.config )
lowerCamelCase_ = True
lowerCamelCase_ = FrozenDict(lowercase )
if safety_checker is None:
logger.warning(
f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=lowercase , segmentation_processor=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , unet=lowercase , scheduler=lowercase , safety_checker=lowercase , feature_extractor=lowercase , )
def SCREAMING_SNAKE_CASE_( self , lowercase = "auto" ) -> Tuple:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
self.enable_attention_slicing(lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> str:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowerCamelCase_ = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(lowercase , lowercase )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , lowercase , lowercase , lowercase , lowercase = 512 , lowercase = 512 , lowercase = 50 , lowercase = 7.5 , lowercase = None , lowercase = 1 , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "pil" , lowercase = True , lowercase = None , lowercase = 1 , **lowercase , ) -> int:
lowerCamelCase_ = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
lowerCamelCase_ = self.segmentation_model(**lowercase )
lowerCamelCase_ = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowerCamelCase_ = self.numpy_to_pil(lowercase )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowerCamelCase_ = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=lowercase , image=lowercase , mask_image=lowercase , height=lowercase , width=lowercase , num_inference_steps=lowercase , guidance_scale=lowercase , negative_prompt=lowercase , num_images_per_prompt=lowercase , eta=lowercase , generator=lowercase , latents=lowercase , output_type=lowercase , return_dict=lowercase , callback=lowercase , callback_steps=lowercase , )
| 19 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Tuple = logging.get_logger(__name__)
a__ : Tuple = {
'''huggingface/time-series-transformer-tourism-monthly''': (
'''https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'''
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : Optional[int] = "time_series_transformer"
snake_case__ : Optional[int] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : str = "student_t" , UpperCAmelCase__ : str = "nll" , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : List[int] = [1, 2, 3, 4, 5, 6, 7] , UpperCAmelCase__ : Optional[Union[str, bool]] = "mean" , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : int = 3_2 , UpperCAmelCase__ : int = 3_2 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : str = "gelu" , UpperCAmelCase__ : int = 6_4 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : int = 1_0_0 , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : Optional[int]=True , **UpperCAmelCase__ : Tuple , ) -> Dict:
# time series specific configuration
__SCREAMING_SNAKE_CASE = prediction_length
__SCREAMING_SNAKE_CASE = context_length or prediction_length
__SCREAMING_SNAKE_CASE = distribution_output
__SCREAMING_SNAKE_CASE = loss
__SCREAMING_SNAKE_CASE = input_size
__SCREAMING_SNAKE_CASE = num_time_features
__SCREAMING_SNAKE_CASE = lags_sequence
__SCREAMING_SNAKE_CASE = scaling
__SCREAMING_SNAKE_CASE = num_dynamic_real_features
__SCREAMING_SNAKE_CASE = num_static_real_features
__SCREAMING_SNAKE_CASE = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(UpperCAmelCase__ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
__SCREAMING_SNAKE_CASE = cardinality
else:
__SCREAMING_SNAKE_CASE = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(UpperCAmelCase__ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
__SCREAMING_SNAKE_CASE = embedding_dimension
else:
__SCREAMING_SNAKE_CASE = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
__SCREAMING_SNAKE_CASE = num_parallel_samples
# Transformer architecture configuration
__SCREAMING_SNAKE_CASE = input_size * len(UpperCAmelCase__ ) + self._number_of_features
__SCREAMING_SNAKE_CASE = d_model
__SCREAMING_SNAKE_CASE = encoder_attention_heads
__SCREAMING_SNAKE_CASE = decoder_attention_heads
__SCREAMING_SNAKE_CASE = encoder_ffn_dim
__SCREAMING_SNAKE_CASE = decoder_ffn_dim
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = decoder_layers
__SCREAMING_SNAKE_CASE = dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = encoder_layerdrop
__SCREAMING_SNAKE_CASE = decoder_layerdrop
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = init_std
__SCREAMING_SNAKE_CASE = use_cache
super().__init__(is_encoder_decoder=UpperCAmelCase__ , **UpperCAmelCase__ )
@property
def UpperCAmelCase_ ( self : Any ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 54 |
from collections import deque
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = len(lowerCamelCase__ )
lowerCamelCase_ = deque()
lowerCamelCase_ = [False for _ in range(lowerCamelCase__ )]
lowerCamelCase_ = [-1 for _ in range(lowerCamelCase__ )]
lowerCamelCase_ = index_of[:]
def strong_connect(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = index # the number when this node is seen
lowerCamelCase_ = index # lowest rank node reachable from here
index += 1
stack.append(lowerCamelCase__ )
lowerCamelCase_ = True
for w in g[v]:
if index_of[w] == -1:
lowerCamelCase_ = strong_connect(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
lowerCamelCase_ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
lowerCamelCase_ = []
lowerCamelCase_ = stack.pop()
lowerCamelCase_ = False
component.append(lowerCamelCase__ )
while w != v:
lowerCamelCase_ = stack.pop()
lowerCamelCase_ = False
component.append(lowerCamelCase__ )
components.append(lowerCamelCase__ )
return index
lowerCamelCase_ = []
for v in range(lowerCamelCase__ ):
if index_of[v] == -1:
strong_connect(lowerCamelCase__ , 0 , lowerCamelCase__ )
return components
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = [[] for _ in range(lowerCamelCase__ )]
for u, v in edges:
g[u].append(lowerCamelCase__ )
return g
if __name__ == "__main__":
# Test
__A =7
__A =[0, 0, 1, 2, 3, 3, 4, 4, 6]
__A =[1, 3, 2, 0, 1, 4, 5, 6, 5]
__A =[(u, v) for u, v in zip(source, target)]
__A =create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 19 | 0 |
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = ""
_lowerCamelCase = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_lowerCamelCase = None # compression type in fsspec. ex: "gzip"
_lowerCamelCase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self , UpperCamelCase = "" , UpperCamelCase = None , UpperCamelCase = None , **UpperCamelCase ):
"""simple docstring"""
super().__init__(self , **UpperCamelCase )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowerCamelCase_ = fsspec.open(
UpperCamelCase , mode="rb" , protocol=UpperCamelCase , compression=self.compression , client_kwargs={
"requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459
"trust_env": True, # Enable reading proxy env variables.
**(target_options or {}).pop("client_kwargs" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowerCamelCase_ = os.path.basename(self.file.path.split("::" )[0] )
lowerCamelCase_ = (
self.compressed_name[: self.compressed_name.rindex("." )]
if "." in self.compressed_name
else self.compressed_name
)
lowerCamelCase_ = None
@classmethod
def snake_case ( cls , UpperCamelCase ):
"""simple docstring"""
# compressed file paths are always relative to the archive root
return super()._strip_protocol(UpperCamelCase ).lstrip("/" )
def snake_case ( self ):
"""simple docstring"""
if self.dir_cache is None:
lowerCamelCase_ = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name}
lowerCamelCase_ = {f["name"]: f}
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
return self.file.open().read()
def snake_case ( self , UpperCamelCase , UpperCamelCase = "rb" , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase=None , **UpperCamelCase , ):
"""simple docstring"""
lowerCamelCase_ = self._strip_protocol(UpperCamelCase )
if mode != "rb":
raise ValueError(f'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = "bz2"
_lowerCamelCase = "bz2"
_lowerCamelCase = ".bz2"
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = "gzip"
_lowerCamelCase = "gzip"
_lowerCamelCase = ".gz"
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = "lz4"
_lowerCamelCase = "lz4"
_lowerCamelCase = ".lz4"
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = "xz"
_lowerCamelCase = "xz"
_lowerCamelCase = ".xz"
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = "zstd"
_lowerCamelCase = "zstd"
_lowerCamelCase = ".zst"
def __init__( self , UpperCamelCase , UpperCamelCase = "rb" , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = DEFAULT_BLOCK_SIZE , **UpperCamelCase , ):
"""simple docstring"""
super().__init__(
fo=UpperCamelCase , mode=UpperCamelCase , target_protocol=UpperCamelCase , target_options=UpperCamelCase , block_size=UpperCamelCase , **UpperCamelCase , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowerCamelCase_ = self.file.__enter__
class snake_case :
"""simple docstring"""
def __init__( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = file_
def __enter__( self ):
"""simple docstring"""
self._file.__enter__()
return self
def __exit__( self , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
self._file.__exit__(*UpperCamelCase , **UpperCamelCase )
def __iter__( self ):
"""simple docstring"""
return iter(self._file )
def snake_case ( self ):
"""simple docstring"""
return next(self._file )
def __getattr__( self , UpperCamelCase ):
"""simple docstring"""
return getattr(self._file , UpperCamelCase )
def fixed_enter(*UpperCamelCase , **UpperCamelCase ):
return WrappedFile(_enter(*UpperCamelCase , **UpperCamelCase ) )
lowerCamelCase_ = fixed_enter
| 55 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A ={'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 19 | 0 |
'''simple docstring'''
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a : List[str] = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class a ( _lowerCamelCase , unittest.TestCase ):
snake_case_ = AlbertTokenizer
snake_case_ = AlbertTokenizerFast
snake_case_ = True
snake_case_ = True
snake_case_ = True
def A_ ( self : int ):
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = AlbertTokenizer(lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self : Dict , lowercase_ : Optional[int] ):
snake_case_ = '''this is a test'''
snake_case_ = '''this is a test'''
return input_text, output_text
def A_ ( self : Union[str, Any] ):
snake_case_ = '''<pad>'''
snake_case_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def A_ ( self : Union[str, Any] ):
snake_case_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''▁eloquent''' )
self.assertEqual(len(lowercase_ ) , 3_0000 )
def A_ ( self : Union[str, Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def A_ ( self : List[str] ):
if not self.test_rust_tokenizer:
return
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_rust_tokenizer()
snake_case_ = '''I was born in 92000, and this is falsé.'''
snake_case_ = tokenizer.tokenize(lowercase_ )
snake_case_ = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
snake_case_ = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
snake_case_ = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
snake_case_ = self.get_rust_tokenizer()
snake_case_ = tokenizer.encode(lowercase_ )
snake_case_ = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def A_ ( self : int ):
snake_case_ = AlbertTokenizer(lowercase_ , keep_accents=lowercase_ )
snake_case_ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowercase_ , ['''▁this''', '''▁is''', '''▁a''', '''▁test'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , [48, 25, 21, 1289] )
snake_case_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowercase_ , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.'''] )
snake_case_ = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(lowercase_ , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
snake_case_ = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.'''] , )
def A_ ( self : Optional[Any] ):
snake_case_ = AlbertTokenizer(lowercase_ )
snake_case_ = tokenizer.encode('''sequence builders''' )
snake_case_ = tokenizer.encode('''multi-sequence build''' )
snake_case_ = tokenizer.build_inputs_with_special_tokens(lowercase_ )
snake_case_ = tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def A_ ( self : Optional[int] ):
# fmt: off
snake_case_ = {'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''input_ids''': [[2, 2_1970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 1_2051, 18, 17, 7103, 2153, 673, 8, 3515, 1_8684, 8, 4461, 6, 1927, 297, 8, 1_2060, 2607, 18, 13, 5, 4461, 15, 1_0538, 38, 8, 135, 15, 822, 58, 15, 993, 1_0363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 1_0641, 6, 29, 84, 2512, 2430, 782, 1_8684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 1_1712, 15, 7103, 2153, 673, 17, 2_4883, 9990, 9, 3], [2, 1_1502, 25, 1006, 20, 782, 8, 1_1809, 855, 1732, 1_9393, 1_8667, 37, 367, 2_1018, 69, 1854, 34, 1_1860, 1_9124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 1_7659, 84, 14, 1_6792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name='''albert-base-v2''' , revision='''6b6560eaf5ff2e250b00c50f380c5389a9c2d82e''' , )
| 56 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A ={'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WavLMForAudioFrameClassification''',
'''WavLMForCTC''',
'''WavLMForSequenceClassification''',
'''WavLMForXVector''',
'''WavLMModel''',
'''WavLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 19 | 0 |
"""simple docstring"""
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
A : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase__ )
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , **__a ):
super().__init__(**__a )
if self.framework != "pt":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
# No specific FOR_XXX available yet
def __call__( self , __a , **__a ):
return super().__call__(__a , **__a )
def snake_case ( self , **__a ):
__lowerCAmelCase = {}
if "candidate_labels" in kwargs:
__lowerCAmelCase = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
__lowerCAmelCase = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def snake_case ( self , __a , __a=None , __a="This is a sound of {}." ):
if isinstance(__a , __a ):
if audio.startswith("http://" ) or audio.startswith("https://" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
__lowerCAmelCase = requests.get(__a ).content
else:
with open(__a , "rb" ) as f:
__lowerCAmelCase = f.read()
if isinstance(__a , __a ):
__lowerCAmelCase = ffmpeg_read(__a , self.feature_extractor.sampling_rate )
if not isinstance(__a , np.ndarray ):
raise ValueError("We expect a numpy ndarray as input" )
if len(audio.shape ) != 1:
raise ValueError("We expect a single channel audio input for ZeroShotAudioClassificationPipeline" )
__lowerCAmelCase = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="pt" )
__lowerCAmelCase = candidate_labels
__lowerCAmelCase = [hypothesis_template.format(__a ) for x in candidate_labels]
__lowerCAmelCase = self.tokenizer(__a , return_tensors=self.framework , padding=__a )
__lowerCAmelCase = [text_inputs]
return inputs
def snake_case ( self , __a ):
__lowerCAmelCase = model_inputs.pop("candidate_labels" )
__lowerCAmelCase = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , __a ):
__lowerCAmelCase = text_inputs[0]
else:
# Batching case.
__lowerCAmelCase = text_inputs[0][0]
__lowerCAmelCase = self.model(**__a , **__a )
__lowerCAmelCase = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_audio,
}
return model_outputs
def snake_case ( self , __a ):
__lowerCAmelCase = model_outputs.pop("candidate_labels" )
__lowerCAmelCase = model_outputs["logits"][0]
if self.framework == "pt":
__lowerCAmelCase = logits.softmax(dim=0 )
__lowerCAmelCase = probs.tolist()
else:
raise ValueError("`tf` framework not supported." )
__lowerCAmelCase = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(__a , __a ) , key=lambda __a : -x[0] )
]
return result
| 57 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__A ='''2.13.1'''
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('''3.7'''):
raise ImportWarning(
'''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'''
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'''
'''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'''
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__A =concatenate_datasets
__A =DownloadConfig
__A =DownloadManager
__A =DownloadMode
__A =DownloadConfig
__A =DownloadMode
__A =DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 19 | 0 |
'''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
lowercase_ = logging.get_logger(__name__)
lowercase_ = OrderedDict(
[
# Base model mapping
("""albert""", """FlaxAlbertModel"""),
("""bart""", """FlaxBartModel"""),
("""beit""", """FlaxBeitModel"""),
("""bert""", """FlaxBertModel"""),
("""big_bird""", """FlaxBigBirdModel"""),
("""blenderbot""", """FlaxBlenderbotModel"""),
("""blenderbot-small""", """FlaxBlenderbotSmallModel"""),
("""clip""", """FlaxCLIPModel"""),
("""distilbert""", """FlaxDistilBertModel"""),
("""electra""", """FlaxElectraModel"""),
("""gpt-sw3""", """FlaxGPT2Model"""),
("""gpt2""", """FlaxGPT2Model"""),
("""gpt_neo""", """FlaxGPTNeoModel"""),
("""gptj""", """FlaxGPTJModel"""),
("""longt5""", """FlaxLongT5Model"""),
("""marian""", """FlaxMarianModel"""),
("""mbart""", """FlaxMBartModel"""),
("""mt5""", """FlaxMT5Model"""),
("""opt""", """FlaxOPTModel"""),
("""pegasus""", """FlaxPegasusModel"""),
("""regnet""", """FlaxRegNetModel"""),
("""resnet""", """FlaxResNetModel"""),
("""roberta""", """FlaxRobertaModel"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""),
("""roformer""", """FlaxRoFormerModel"""),
("""t5""", """FlaxT5Model"""),
("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""),
("""vit""", """FlaxViTModel"""),
("""wav2vec2""", """FlaxWav2Vec2Model"""),
("""whisper""", """FlaxWhisperModel"""),
("""xglm""", """FlaxXGLMModel"""),
("""xlm-roberta""", """FlaxXLMRobertaModel"""),
]
)
lowercase_ = OrderedDict(
[
# Model for pre-training mapping
("""albert""", """FlaxAlbertForPreTraining"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForPreTraining"""),
("""big_bird""", """FlaxBigBirdForPreTraining"""),
("""electra""", """FlaxElectraForPreTraining"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
lowercase_ = OrderedDict(
[
# Model for Masked LM mapping
("""albert""", """FlaxAlbertForMaskedLM"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForMaskedLM"""),
("""big_bird""", """FlaxBigBirdForMaskedLM"""),
("""distilbert""", """FlaxDistilBertForMaskedLM"""),
("""electra""", """FlaxElectraForMaskedLM"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
lowercase_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("""bart""", """FlaxBartForConditionalGeneration"""),
("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""),
("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""),
("""encoder-decoder""", """FlaxEncoderDecoderModel"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""marian""", """FlaxMarianMTModel"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""pegasus""", """FlaxPegasusForConditionalGeneration"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
]
)
lowercase_ = OrderedDict(
[
# Model for Image-classsification
("""beit""", """FlaxBeitForImageClassification"""),
("""regnet""", """FlaxRegNetForImageClassification"""),
("""resnet""", """FlaxResNetForImageClassification"""),
("""vit""", """FlaxViTForImageClassification"""),
]
)
lowercase_ = OrderedDict(
[
("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""),
]
)
lowercase_ = OrderedDict(
[
# Model for Causal LM mapping
("""bart""", """FlaxBartForCausalLM"""),
("""bert""", """FlaxBertForCausalLM"""),
("""big_bird""", """FlaxBigBirdForCausalLM"""),
("""electra""", """FlaxElectraForCausalLM"""),
("""gpt-sw3""", """FlaxGPT2LMHeadModel"""),
("""gpt2""", """FlaxGPT2LMHeadModel"""),
("""gpt_neo""", """FlaxGPTNeoForCausalLM"""),
("""gptj""", """FlaxGPTJForCausalLM"""),
("""opt""", """FlaxOPTForCausalLM"""),
("""roberta""", """FlaxRobertaForCausalLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""),
("""xglm""", """FlaxXGLMForCausalLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""),
]
)
lowercase_ = OrderedDict(
[
# Model for Sequence Classification mapping
("""albert""", """FlaxAlbertForSequenceClassification"""),
("""bart""", """FlaxBartForSequenceClassification"""),
("""bert""", """FlaxBertForSequenceClassification"""),
("""big_bird""", """FlaxBigBirdForSequenceClassification"""),
("""distilbert""", """FlaxDistilBertForSequenceClassification"""),
("""electra""", """FlaxElectraForSequenceClassification"""),
("""mbart""", """FlaxMBartForSequenceClassification"""),
("""roberta""", """FlaxRobertaForSequenceClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""),
("""roformer""", """FlaxRoFormerForSequenceClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""),
]
)
lowercase_ = OrderedDict(
[
# Model for Question Answering mapping
("""albert""", """FlaxAlbertForQuestionAnswering"""),
("""bart""", """FlaxBartForQuestionAnswering"""),
("""bert""", """FlaxBertForQuestionAnswering"""),
("""big_bird""", """FlaxBigBirdForQuestionAnswering"""),
("""distilbert""", """FlaxDistilBertForQuestionAnswering"""),
("""electra""", """FlaxElectraForQuestionAnswering"""),
("""mbart""", """FlaxMBartForQuestionAnswering"""),
("""roberta""", """FlaxRobertaForQuestionAnswering"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""),
("""roformer""", """FlaxRoFormerForQuestionAnswering"""),
("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""),
]
)
lowercase_ = OrderedDict(
[
# Model for Token Classification mapping
("""albert""", """FlaxAlbertForTokenClassification"""),
("""bert""", """FlaxBertForTokenClassification"""),
("""big_bird""", """FlaxBigBirdForTokenClassification"""),
("""distilbert""", """FlaxDistilBertForTokenClassification"""),
("""electra""", """FlaxElectraForTokenClassification"""),
("""roberta""", """FlaxRobertaForTokenClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""),
("""roformer""", """FlaxRoFormerForTokenClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""),
]
)
lowercase_ = OrderedDict(
[
# Model for Multiple Choice mapping
("""albert""", """FlaxAlbertForMultipleChoice"""),
("""bert""", """FlaxBertForMultipleChoice"""),
("""big_bird""", """FlaxBigBirdForMultipleChoice"""),
("""distilbert""", """FlaxDistilBertForMultipleChoice"""),
("""electra""", """FlaxElectraForMultipleChoice"""),
("""roberta""", """FlaxRobertaForMultipleChoice"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""),
("""roformer""", """FlaxRoFormerForMultipleChoice"""),
("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""),
]
)
lowercase_ = OrderedDict(
[
("""bert""", """FlaxBertForNextSentencePrediction"""),
]
)
lowercase_ = OrderedDict(
[
("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
]
)
lowercase_ = OrderedDict(
[
("""whisper""", """FlaxWhisperForAudioClassification"""),
]
)
lowercase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
lowercase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
lowercase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
lowercase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
lowercase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
lowercase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
lowercase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
lowercase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
lowercase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
lowercase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
lowercase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
lowercase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
lowercase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
lowercase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class a_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase = FLAX_MODEL_MAPPING
lowercase_ = auto_class_update(FlaxAutoModel)
class a_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase = FLAX_MODEL_FOR_PRETRAINING_MAPPING
lowercase_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""")
class a_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
lowercase_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""")
class a_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase = FLAX_MODEL_FOR_MASKED_LM_MAPPING
lowercase_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""")
class a_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowercase_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base"""
)
class a_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowercase_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="""sequence classification"""
)
class a_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
lowercase_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""")
class a_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowercase_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="""token classification"""
)
class a_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
lowercase_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""")
class a_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
lowercase_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction"""
)
class a_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowercase_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="""image classification"""
)
class a_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
lowercase_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""")
class a_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
lowercase_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling"""
)
| 58 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A ={
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 19 | 0 |
import logging
from transformers.configuration_utils import PretrainedConfig
__lowerCamelCase = logging.getLogger(__name__)
class UpperCAmelCase ( A_ ):
A__ : int = "masked_bert"
def __init__(self : Dict , snake_case__ : Optional[Any]=3_05_22 , snake_case__ : int=7_68 , snake_case__ : Union[str, Any]=12 , snake_case__ : List[str]=12 , snake_case__ : Optional[Any]=30_72 , snake_case__ : Union[str, Any]="gelu" , snake_case__ : str=0.1 , snake_case__ : Optional[int]=0.1 , snake_case__ : List[str]=5_12 , snake_case__ : Tuple=2 , snake_case__ : Optional[Any]=0.02 , snake_case__ : List[Any]=1e-12 , snake_case__ : Optional[int]=0 , snake_case__ : Dict="topK" , snake_case__ : Any="constant" , snake_case__ : Tuple=0.0 , **snake_case__ : Dict , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , **snake_case__ )
snake_case : Any = vocab_size
snake_case : int = hidden_size
snake_case : Dict = num_hidden_layers
snake_case : Optional[int] = num_attention_heads
snake_case : int = hidden_act
snake_case : Tuple = intermediate_size
snake_case : Any = hidden_dropout_prob
snake_case : Optional[Any] = attention_probs_dropout_prob
snake_case : Dict = max_position_embeddings
snake_case : Any = type_vocab_size
snake_case : List[Any] = initializer_range
snake_case : List[Any] = layer_norm_eps
snake_case : Tuple = pruning_method
snake_case : Tuple = mask_init
snake_case : Optional[int] = mask_scale
| 59 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , ) -> Optional[int]:
lowerCamelCase_ = parent
lowerCamelCase_ = 13
lowerCamelCase_ = 7
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = 99
lowerCamelCase_ = 32
lowerCamelCase_ = 2
lowerCamelCase_ = 4
lowerCamelCase_ = 37
lowerCamelCase_ = "gelu"
lowerCamelCase_ = 0.1
lowerCamelCase_ = 0.1
lowerCamelCase_ = 512
lowerCamelCase_ = 16
lowerCamelCase_ = 2
lowerCamelCase_ = 0.0_2
lowerCamelCase_ = 3
lowerCamelCase_ = 4
lowerCamelCase_ = None
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = self.prepare_config_and_inputs()
lowerCamelCase_ = True
lowerCamelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Any:
lowerCamelCase_ = TFEsmModel(config=lowercase )
lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = [input_ids, input_mask]
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Tuple:
lowerCamelCase_ = True
lowerCamelCase_ = TFEsmModel(config=lowercase )
lowerCamelCase_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"encoder_hidden_states": encoder_hidden_states,
"encoder_attention_mask": encoder_attention_mask,
}
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = [input_ids, input_mask]
lowerCamelCase_ = model(lowercase , encoder_hidden_states=lowercase )
# Also check the case where encoder outputs are not passed
lowerCamelCase_ = model(lowercase , attention_mask=lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
lowerCamelCase_ = TFEsmForMaskedLM(config=lowercase )
lowerCamelCase_ = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = TFEsmForTokenClassification(config=lowercase )
lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
lowerCamelCase_ = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = TFEsmModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFEsmModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@unittest.skip("Protein models do not support embedding resizing." )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
pass
@unittest.skip("Protein models do not support embedding resizing." )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
pass
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(lowercase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCamelCase_ = model.get_bias()
assert isinstance(lowercase , lowercase )
for k, v in name.items():
assert isinstance(lowercase , tf.Variable )
else:
lowerCamelCase_ = model.get_output_embeddings()
assert x is None
lowerCamelCase_ = model.get_bias()
assert name is None
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
lowerCamelCase_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase_ = model(lowercase )[0]
lowerCamelCase_ = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , lowercase )
# compare the actual values for a slice.
lowerCamelCase_ = tf.constant(
[
[
[8.9_2_1_5_1_8, -1_0.5_8_9_8_1_4, -6.4_6_7_1_3_0_7],
[-6.3_9_6_7_1_5_6, -1_3.9_1_1_3_7_7, -1.1_2_1_1_9_1_5],
[-7.7_8_1_2_4_7, -1_3.9_5_1_5_5_7, -3.7_4_0_5_9_2],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
lowerCamelCase_ = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase_ = model(lowercase )[0]
# compare the actual values for a slice.
lowerCamelCase_ = tf.constant(
[
[
[0.1_4_4_4_3_0_9_2, 0.5_4_1_2_5_3_2_7, 0.3_2_4_7_7_3_9],
[0.3_0_3_4_0_4_8_4, 0.0_0_5_2_6_6_7_6, 0.3_1_0_7_7_7_2_2],
[0.3_2_2_7_8_0_4_3, -0.2_4_9_8_7_0_9_6, 0.3_4_1_4_6_2_8],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 19 | 0 |
"""simple docstring"""
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : List[Any] = 0
@slow
def lowerCamelCase__ ( self : Optional[int] ):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(UpperCamelCase_ ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(UpperCamelCase_ ) , 0 )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : str = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def lowerCamelCase__ ( self : str ):
lowerCAmelCase : Any = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 2_0 )
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : List[Any] = AutoConfig.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
# Check that tokenizer_type ≠ model_type
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(UpperCamelCase_ , config=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def lowerCamelCase__ ( self : Optional[int] ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(UpperCamelCase_ , '''vocab.txt''' ) )
lowerCAmelCase : int = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''bert''' , use_fast=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(UpperCamelCase_ , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(UpperCamelCase_ , '''merges.txt''' ) )
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''gpt2''' , use_fast=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
@require_tokenizers
def lowerCamelCase__ ( self : List[str] ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(UpperCamelCase_ , '''vocab.txt''' ) )
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''bert''' )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(UpperCamelCase_ , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(UpperCamelCase_ , '''merges.txt''' ) )
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''gpt2''' )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] ):
with pytest.raises(UpperCamelCase_ ):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' )
@require_tokenizers
def lowerCamelCase__ ( self : Optional[Any] ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
lowerCAmelCase : List[str] = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , UpperCamelCase_ )
else:
self.assertEqual(tokenizer.do_lower_case , UpperCamelCase_ )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
@require_tokenizers
def lowerCamelCase__ ( self : Any ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
UpperCamelCase_ , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
lowerCAmelCase : Optional[Any] = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def lowerCamelCase__ ( self : Any ):
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
lowerCAmelCase : List[Any] = TOKENIZER_MAPPING.values()
lowerCAmelCase : Tuple = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(UpperCamelCase_ )
@require_tokenizers
def lowerCamelCase__ ( self : Dict ):
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=UpperCamelCase_ ) , UpperCamelCase_ )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , UpperCamelCase_ )
@require_tokenizers
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=UpperCamelCase_ )
lowerCAmelCase : List[str] = '''Hello, world. How are you?'''
lowerCAmelCase : Dict = tokenizer.tokenize(UpperCamelCase_ )
self.assertEqual('''[UNK]''' , tokens[0] )
lowerCAmelCase : str = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=UpperCamelCase_ )
lowerCAmelCase : int = tokenizer.tokenize(UpperCamelCase_ )
self.assertEqual('''[UNK]''' , tokens[0] )
@require_tokenizers
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
self.assertEqual(tokenizer.vocab_size , 3_0_0_0_0 )
self.assertEqual(tokenizer.unk_token , '''[UNK]''' )
self.assertEqual(tokenizer.padding_side , '''right''' )
self.assertEqual(tokenizer.truncation_side , '''right''' )
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Dict = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 1_2 )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : str ):
# Check we can load the tokenizer config of an online model.
lowerCAmelCase : Union[str, Any] = get_tokenizer_config('''bert-base-cased''' )
lowerCAmelCase : Optional[int] = config.pop('''_commit_hash''' , UpperCamelCase_ )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(UpperCamelCase_ , {'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
lowerCAmelCase : str = get_tokenizer_config(UpperCamelCase_ )
self.assertDictEqual(UpperCamelCase_ , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
lowerCAmelCase : Dict = AutoTokenizer.from_pretrained(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : Tuple = get_tokenizer_config(UpperCamelCase_ )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' )
def lowerCamelCase__ ( self : Union[str, Any] ):
try:
AutoConfig.register('''custom''' , UpperCamelCase_ )
AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase_ ):
AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ )
lowerCAmelCase : Dict = CustomTokenizer.from_pretrained(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : Any = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowerCamelCase__ ( self : int ):
try:
AutoConfig.register('''custom''' , UpperCamelCase_ )
# Can register in two steps
AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase_ ):
AutoTokenizer.register(UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase : Any = BertTokenizerFast.from_pretrained(UpperCamelCase_ )
bert_tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : int = CustomTokenizerFast.from_pretrained(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCamelCase__ ( self : Dict ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCamelCase_ ):
lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase_ ):
lowerCAmelCase : str = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ )
lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : Any = AutoTokenizer.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
lowerCAmelCase : str = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : Any = AutoTokenizer.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
@require_tokenizers
def lowerCamelCase__ ( self : int ):
class snake_case_( a__ ):
__UpperCamelCase = False
class snake_case_( a__ ):
__UpperCamelCase = NewTokenizer
__UpperCamelCase = False
try:
AutoConfig.register('''custom''' , UpperCamelCase_ )
AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ )
AutoTokenizer.register(UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ )
# If remote code is not set, the default is to use local
lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
lowerCAmelCase : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
lowerCAmelCase : Any = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
lowerCAmelCase : str = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
lowerCAmelCase : Dict = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=UpperCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def lowerCamelCase__ ( self : Optional[Any] ):
with self.assertRaisesRegex(
UpperCamelCase_ , '''bert-base is not a local folder and is not a valid model identifier''' ):
lowerCAmelCase : str = AutoTokenizer.from_pretrained('''bert-base''' )
def lowerCamelCase__ ( self : Any ):
with self.assertRaisesRegex(
UpperCamelCase_ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , revision='''aaaaaa''' )
def lowerCamelCase__ ( self : Any ):
# Make sure we have cached the tokenizer.
lowerCAmelCase : str = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 60 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
__A =logging.get_logger(__name__)
def lowerCamelCase_ ( ):
# Get the sagemaker specific mp parameters from smp_options variable.
lowerCamelCase_ = os.getenv("SM_HP_MP_PARAMETERS" , "{}" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
lowerCamelCase_ = json.loads(lowerCamelCase__ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
lowerCamelCase_ = os.getenv("SM_FRAMEWORK_PARAMS" , "{}" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
lowerCamelCase_ = json.loads(lowerCamelCase__ )
if not mpi_options.get("sagemaker_mpi_enabled" , lowerCamelCase__ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = field(
default='' , metadata={'help': 'Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'} , )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." , lowercase , )
@cached_property
def SCREAMING_SNAKE_CASE_( self ) -> "torch.device":
logger.info("PyTorch: setting up devices" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" )
if self.no_cuda:
lowerCamelCase_ = torch.device("cpu" )
lowerCamelCase_ = 0
elif is_sagemaker_model_parallel_available():
lowerCamelCase_ = smp.local_rank()
lowerCamelCase_ = torch.device("cuda" , lowercase )
lowerCamelCase_ = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta )
lowerCamelCase_ = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) )
lowerCamelCase_ = torch.device("cuda" , self.local_rank )
lowerCamelCase_ = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
lowerCamelCase_ = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
lowerCamelCase_ = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta )
lowerCamelCase_ = torch.device("cuda" , self.local_rank )
lowerCamelCase_ = 1
if device.type == "cuda":
torch.cuda.set_device(lowercase )
return device
@property
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
return not is_sagemaker_model_parallel_available()
@property
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
return False
| 19 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a = logging.get_logger(__name__)
_a = '▁'
_a = {'vocab_file': 'sentencepiece.bpe.model'}
_a = {
'vocab_file': {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model',
}
}
_a = {
'facebook/xglm-564M': 2_048,
}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Any = ["""input_ids""", """attention_mask"""]
def __init__( self , lowercase_ , lowercase_="<s>" , lowercase_="</s>" , lowercase_="</s>" , lowercase_="<s>" , lowercase_="<unk>" , lowercase_="<pad>" , lowercase_ = None , **lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
UpperCAmelCase_ : List[str] = 7
UpperCAmelCase_ : Optional[int] = [F"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
UpperCAmelCase_ : int = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , pad_token=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
UpperCAmelCase_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase_ ) )
UpperCAmelCase_ : Any = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase_ : List[str] = 1
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase_ : Optional[Any] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
UpperCAmelCase_ : int = len(self.sp_model )
UpperCAmelCase_ : Tuple = {F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(lowercase_ )
UpperCAmelCase_ : Dict = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.__dict__.copy()
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Optional[int] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase_ : str = {}
UpperCAmelCase_ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
UpperCAmelCase_ : Any = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowercase_ ))
return [1] + ([0] * len(lowercase_ )) + [1, 1] + ([0] * len(lowercase_ ))
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
return self.sp_model.encode(lowercase_ , out_type=lowercase_ )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase_ : Optional[Any] = self.sp_model.PieceToId(lowercase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = "".join(lowercase_ ).replace(lowercase_ , " " ).strip()
return out_string
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
if not os.path.isdir(lowercase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ : Optional[int] = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , "wb" ) as fi:
UpperCAmelCase_ : List[Any] = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
| 61 |
import math
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ = 0 , lowerCamelCase__ = 0 ):
lowerCamelCase_ = end or len(lowerCamelCase__ )
for i in range(lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = i
lowerCamelCase_ = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
lowerCamelCase_ = array[temp_index - 1]
temp_index -= 1
lowerCamelCase_ = temp_index_value
return array
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): # Max Heap
lowerCamelCase_ = index
lowerCamelCase_ = 2 * index + 1 # Left Node
lowerCamelCase_ = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
lowerCamelCase_ = left_index
if right_index < heap_size and array[largest] < array[right_index]:
lowerCamelCase_ = right_index
if largest != index:
lowerCamelCase_ , lowerCamelCase_ = array[largest], array[index]
heapify(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = len(lowerCamelCase__ )
for i in range(n // 2 , -1 , -1 ):
heapify(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
for i in range(n - 1 , 0 , -1 ):
lowerCamelCase_ , lowerCamelCase_ = array[0], array[i]
heapify(lowerCamelCase__ , 0 , lowerCamelCase__ )
return array
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = low
lowerCamelCase_ = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
lowerCamelCase_ , lowerCamelCase_ = array[j], array[i]
i += 1
def lowerCamelCase_ ( lowerCamelCase__ ):
if len(lowerCamelCase__ ) == 0:
return array
lowerCamelCase_ = 2 * math.ceil(math.loga(len(lowerCamelCase__ ) ) )
lowerCamelCase_ = 1_6
return intro_sort(lowerCamelCase__ , 0 , len(lowerCamelCase__ ) , lowerCamelCase__ , lowerCamelCase__ )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(lowerCamelCase__ )
max_depth -= 1
lowerCamelCase_ = median_of_a(lowerCamelCase__ , lowerCamelCase__ , start + ((end - start) // 2) + 1 , end - 1 )
lowerCamelCase_ = partition(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
intro_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = p
return insertion_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
__A =input('''Enter numbers separated by a comma : ''').strip()
__A =[float(item) for item in user_input.split(''',''')]
print(sort(unsorted))
| 19 | 0 |
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
_A = [
'cross_validation.py',
'gradient_accumulation.py',
'local_sgd.py',
'multi_process_metrics.py',
'memory.py',
'automatic_gradient_accumulation.py',
'fsdp_with_peak_mem_tracking.py',
'deepspeed_with_config_support.py',
'megatron_lm_gpt_pretraining.py',
]
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self , A_ , A_ , A_ = None , A_ = None ) -> Optional[Any]:
__UpperCamelCase =None
__UpperCamelCase =os.path.abspath(os.path.join('examples' , 'by_feature' ) )
__UpperCamelCase =os.path.abspath('examples' )
for item in os.listdir(A_ ):
if item not in EXCLUDE_EXAMPLES:
__UpperCamelCase =os.path.join(A_ , A_ )
if os.path.isfile(A_ ) and ".py" in item_path:
with self.subTest(
tested_script=A_ , feature_script=A_ , tested_section='main()' if parser_only else 'training_function()' , ):
__UpperCamelCase =compare_against_test(
os.path.join(A_ , A_ ) , A_ , A_ , A_ )
__UpperCamelCase ='\n'.join(A_ )
if special_strings is not None:
for string in special_strings:
__UpperCamelCase =diff.replace(A_ , '' )
self.assertEqual(A_ , '' )
def _a ( self ) -> Dict:
self.one_complete_example('complete_nlp_example.py' , A_ )
self.one_complete_example('complete_nlp_example.py' , A_ )
def _a ( self ) -> Dict:
__UpperCamelCase =os.path.abspath(os.path.join('examples' , 'cv_example.py' ) )
__UpperCamelCase =[
' ' * 16 + '{\n\n',
' ' * 20 + '"accuracy": eval_metric["accuracy"],\n\n',
' ' * 20 + '"f1": eval_metric["f1"],\n\n',
' ' * 20 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n',
' ' * 20 + '"epoch": epoch,\n\n',
' ' * 16 + '},\n\n',
' ' * 16 + 'step=epoch,\n',
' ' * 12,
' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n',
]
self.one_complete_example('complete_cv_example.py' , A_ , A_ , A_ )
self.one_complete_example('complete_cv_example.py' , A_ , A_ , A_ )
@mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"} )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = False
@classmethod
def _a ( cls ) -> Union[str, Any]:
super().setUpClass()
__UpperCamelCase =tempfile.mkdtemp()
__UpperCamelCase =os.path.join(cls._tmpdir , 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
__UpperCamelCase =['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def _a ( cls ) -> Union[str, Any]:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def _a ( self ) -> List[Any]:
__UpperCamelCase =f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0' ) ) )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split()
__UpperCamelCase =run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2' ) ) )
def _a ( self ) -> Tuple:
__UpperCamelCase =f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n '.split()
__UpperCamelCase =run_command(self._launch_args + testargs , return_stdout=A_ )
self.assertNotIn('epoch 0:' , A_ )
self.assertIn('epoch 1:' , A_ )
def _a ( self ) -> int:
__UpperCamelCase =f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n '.split()
__UpperCamelCase =run_command(self._launch_args + testargs , return_stdout=A_ )
if torch.cuda.is_available():
__UpperCamelCase =torch.cuda.device_count()
else:
__UpperCamelCase =1
if num_processes > 1:
self.assertNotIn('epoch 0:' , A_ )
self.assertIn('epoch 1:' , A_ )
else:
self.assertIn('epoch 0:' , A_ )
self.assertIn('epoch 1:' , A_ )
@slow
def _a ( self ) -> Optional[Any]:
__UpperCamelCase ='\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split()
with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'} ):
__UpperCamelCase =run_command(self._launch_args + testargs , return_stdout=A_ )
__UpperCamelCase =re.findall('({.+})' , A_ )
__UpperCamelCase =[r for r in results if 'accuracy' in r][-1]
__UpperCamelCase =ast.literal_eval(A_ )
self.assertGreaterEqual(results['accuracy'] , 0.75 )
def _a ( self ) -> str:
__UpperCamelCase =['examples/by_feature/multi_process_metrics.py']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def _a ( self ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdir:
__UpperCamelCase =f'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(A_ , 'tracking' ) ) )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =['examples/by_feature/gradient_accumulation.py']
run_command(self._launch_args + testargs )
def _a ( self ) -> List[Any]:
__UpperCamelCase =['examples/by_feature/local_sgd.py']
run_command(self._launch_args + testargs )
| 62 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , *lowercase , lowercase=None , lowercase=None , **lowercase ) -> List[str]:
super().__init__(*lowercase , **lowercase )
lowerCamelCase_ = eval_examples
lowerCamelCase_ = post_process_function
def SCREAMING_SNAKE_CASE_( self , lowercase = None , lowercase=None , lowercase = None , lowercase = "eval" , **lowercase , ) -> Dict[str, float]:
lowerCamelCase_ = gen_kwargs.copy()
lowerCamelCase_ = (
gen_kwargs["max_length"] if gen_kwargs.get("max_length" ) is not None else self.args.generation_max_length
)
lowerCamelCase_ = (
gen_kwargs["num_beams"] if gen_kwargs.get("num_beams" ) is not None else self.args.generation_num_beams
)
lowerCamelCase_ = gen_kwargs
lowerCamelCase_ = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCamelCase_ = self.get_eval_dataloader(lowercase )
lowerCamelCase_ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase_ = self.compute_metrics
lowerCamelCase_ = None
lowerCamelCase_ = time.time()
lowerCamelCase_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCamelCase_ = eval_loop(
lowercase , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase , metric_key_prefix=lowercase , )
finally:
lowerCamelCase_ = compute_metrics
lowerCamelCase_ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
lowercase , lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowerCamelCase_ = self.post_process_function(lowercase , lowercase , lowercase )
lowerCamelCase_ = self.compute_metrics(lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowerCamelCase_ = metrics.pop(lowercase )
metrics.update(output.metrics )
else:
lowerCamelCase_ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowercase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowerCamelCase_ = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase )
return metrics
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase=None , lowercase = "test" , **lowercase ) -> Union[str, Any]:
lowerCamelCase_ = gen_kwargs.copy()
lowerCamelCase_ = self.get_test_dataloader(lowercase )
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase_ = self.compute_metrics
lowerCamelCase_ = None
lowerCamelCase_ = time.time()
lowerCamelCase_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCamelCase_ = eval_loop(
lowercase , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase , metric_key_prefix=lowercase , )
finally:
lowerCamelCase_ = compute_metrics
lowerCamelCase_ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
lowercase , lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCamelCase_ = self.post_process_function(lowercase , lowercase , lowercase , "predict" )
lowerCamelCase_ = self.compute_metrics(lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowerCamelCase_ = metrics.pop(lowercase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase )
| 19 | 0 |
'''simple docstring'''
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ : Dict = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =AlbertTokenizer
__a =AlbertTokenizerFast
__a =True
__a =True
__a =True
def UpperCamelCase__ ( self : List[str] ):
super().setUp()
# We have a SentencePiece fixture for testing
_a = AlbertTokenizer(__a )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self : Dict , __a : int ):
_a = "this is a test"
_a = "this is a test"
return input_text, output_text
def UpperCamelCase__ ( self : Any ):
_a = "<pad>"
_a = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def UpperCamelCase__ ( self : List[Any] ):
_a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "▁eloquent" )
self.assertEqual(len(__a ) , 3_00_00 )
def UpperCamelCase__ ( self : Union[str, Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 3_00_00 )
def UpperCamelCase__ ( self : Dict ):
if not self.test_rust_tokenizer:
return
_a = self.get_tokenizer()
_a = self.get_rust_tokenizer()
_a = "I was born in 92000, and this is falsé."
_a = tokenizer.tokenize(__a )
_a = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
_a = tokenizer.encode(__a , add_special_tokens=__a )
_a = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
_a = self.get_rust_tokenizer()
_a = tokenizer.encode(__a )
_a = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
def UpperCamelCase__ ( self : Dict ):
_a = AlbertTokenizer(__a , keep_accents=__a )
_a = tokenizer.tokenize("This is a test" )
self.assertListEqual(__a , ["▁this", "▁is", "▁a", "▁test"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [48, 25, 21, 12_89] )
_a = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__a , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."] )
_a = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(__a , [31, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9] )
_a = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."] , )
def UpperCamelCase__ ( self : int ):
_a = AlbertTokenizer(__a )
_a = tokenizer.encode("sequence builders" )
_a = tokenizer.encode("multi-sequence build" )
_a = tokenizer.build_inputs_with_special_tokens(__a )
_a = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def UpperCamelCase__ ( self : str ):
# fmt: off
_a = {"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 2_19_70, 13, 5, 60_92, 1_67, 28, 71_03, 21_53, 6_73, 8, 70_28, 1_20_51, 18, 17, 71_03, 21_53, 6_73, 8, 35_15, 1_86_84, 8, 44_61, 6, 19_27, 2_97, 8, 1_20_60, 26_07, 18, 13, 5, 44_61, 15, 1_05_38, 38, 8, 1_35, 15, 8_22, 58, 15, 9_93, 1_03_63, 15, 14_60, 80_05, 44_61, 15, 9_93, 2_55, 23_28, 9, 9, 9, 6, 26, 11_12, 8_16, 32_60, 13, 5, 1_03, 23_77, 6, 17, 11_12, 8_16, 27_82, 13, 5, 1_03, 1_06_41, 6, 29, 84, 25_12, 24_30, 7_82, 1_86_84, 27_61, 19, 8_08, 24_30, 25_56, 17, 8_55, 14_80, 94_77, 40_91, 1_28, 1_17_12, 15, 71_03, 21_53, 6_73, 17, 2_48_83, 99_90, 9, 3], [2, 1_15_02, 25, 10_06, 20, 7_82, 8, 1_18_09, 8_55, 17_32, 1_93_93, 1_86_67, 37, 3_67, 2_10_18, 69, 18_54, 34, 1_18_60, 1_91_24, 27, 1_56, 2_25, 17, 1_93, 41_41, 19, 65, 91_24, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 22_31, 8_86, 23_85, 1_76_59, 84, 14, 1_67_92, 19_52, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="albert-base-v2" , revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e" , )
| 63 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__A =logging.get_logger(__name__) # pylint: disable=invalid-name
__A ='''
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
>>> repo = "openai/shap-e-img2img"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"
>>> image = load_image(image_url).convert("RGB")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], "corgi_3d.gif")
```
'''
@dataclass
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 42
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> List[str]:
super().__init__()
self.register_modules(
prior=lowercase , image_encoder=lowercase , image_processor=lowercase , scheduler=lowercase , renderer=lowercase , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
if latents is None:
lowerCamelCase_ = randn_tensor(lowercase , generator=lowercase , device=lowercase , dtype=lowercase )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowerCamelCase_ = latents.to(lowercase )
lowerCamelCase_ = latents * scheduler.init_noise_sigma
return latents
def SCREAMING_SNAKE_CASE_( self , lowercase=0 ) -> int:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowerCamelCase_ = torch.device(f'cuda:{gpu_id}' )
lowerCamelCase_ = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase , lowercase )
@property
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder , "_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(lowercase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , ) -> List[str]:
if isinstance(lowercase , lowercase ) and isinstance(image[0] , torch.Tensor ):
lowerCamelCase_ = torch.cat(lowercase , axis=0 ) if image[0].ndim == 4 else torch.stack(lowercase , axis=0 )
if not isinstance(lowercase , torch.Tensor ):
lowerCamelCase_ = self.image_processor(lowercase , return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
lowerCamelCase_ = image.to(dtype=self.image_encoder.dtype , device=lowercase )
lowerCamelCase_ = self.image_encoder(lowercase )["last_hidden_state"]
lowerCamelCase_ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowerCamelCase_ = image_embeds.repeat_interleave(lowercase , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase_ = torch.zeros_like(lowercase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase_ = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(lowercase )
def __call__( self , lowercase , lowercase = 1 , lowercase = 25 , lowercase = None , lowercase = None , lowercase = 4.0 , lowercase = 64 , lowercase = "pil" , lowercase = True , ) -> Union[str, Any]:
if isinstance(lowercase , PIL.Image.Image ):
lowerCamelCase_ = 1
elif isinstance(lowercase , torch.Tensor ):
lowerCamelCase_ = image.shape[0]
elif isinstance(lowercase , lowercase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowerCamelCase_ = len(lowercase )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(lowercase )}' )
lowerCamelCase_ = self._execution_device
lowerCamelCase_ = batch_size * num_images_per_prompt
lowerCamelCase_ = guidance_scale > 1.0
lowerCamelCase_ = self._encode_image(lowercase , lowercase , lowercase , lowercase )
# prior
self.scheduler.set_timesteps(lowercase , device=lowercase )
lowerCamelCase_ = self.scheduler.timesteps
lowerCamelCase_ = self.prior.config.num_embeddings
lowerCamelCase_ = self.prior.config.embedding_dim
lowerCamelCase_ = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , lowercase , lowercase , lowercase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowerCamelCase_ = latents.reshape(latents.shape[0] , lowercase , lowercase )
for i, t in enumerate(self.progress_bar(lowercase ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ = self.scheduler.scale_model_input(lowercase , lowercase )
lowerCamelCase_ = self.prior(
lowercase , timestep=lowercase , proj_embedding=lowercase , ).predicted_image_embedding
# remove the variance
lowerCamelCase_ , lowerCamelCase_ = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowerCamelCase_ , lowerCamelCase_ = noise_pred.chunk(2 )
lowerCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowerCamelCase_ = self.scheduler.step(
lowercase , timestep=lowercase , sample=lowercase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=lowercase )
lowerCamelCase_ = []
for i, latent in enumerate(lowercase ):
print()
lowerCamelCase_ = self.renderer.decode(
latent[None, :] , lowercase , size=lowercase , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(lowercase )
lowerCamelCase_ = torch.stack(lowercase )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
lowerCamelCase_ = images.cpu().numpy()
if output_type == "pil":
lowerCamelCase_ = [self.numpy_to_pil(lowercase ) for image in images]
# Offload last model to CPU
if hasattr(self , "final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=lowercase )
| 19 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "speech_to_text_2"
lowercase__ = ["past_key_values"]
lowercase__ = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"}
def __init__( self: List[str], a_: str=10_000, a_: str=6, a_: List[Any]=2_048, a_: Optional[int]=4, a_: List[str]=0.0, a_: Optional[int]=True, a_: List[str]="relu", a_: Optional[Any]=256, a_: Any=0.1, a_: List[str]=0.0, a_: Optional[int]=0.0, a_: Optional[Any]=0.02, a_: Union[str, Any]=2, a_: List[str]=True, a_: Optional[Any]=1, a_: Dict=0, a_: Dict=2, a_: Optional[int]=1_024, **a_: Any, ):
'''simple docstring'''
_snake_case : Union[str, Any] = vocab_size
_snake_case : str = d_model
_snake_case : Tuple = decoder_ffn_dim
_snake_case : Optional[int] = decoder_layers
_snake_case : Optional[Any] = decoder_attention_heads
_snake_case : Union[str, Any] = dropout
_snake_case : List[str] = attention_dropout
_snake_case : str = activation_dropout
_snake_case : Union[str, Any] = activation_function
_snake_case : List[str] = init_std
_snake_case : int = decoder_layerdrop
_snake_case : int = use_cache
_snake_case : str = decoder_layers
_snake_case : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
_snake_case : int = max_target_positions
super().__init__(
pad_token_id=a_, bos_token_id=a_, eos_token_id=a_, decoder_start_token_id=a_, **a_, )
| 64 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowerCamelCase_ ( ):
lowerCamelCase_ , lowerCamelCase_ = 9, 1_4 # noqa: F841
lowerCamelCase_ = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 1_4],
[3, 4, 9],
[5, 4, 1_0],
[1, 7, 1_1],
]
lowerCamelCase_ = defaultdict(lowerCamelCase__ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
lowerCamelCase_ = mst(lowerCamelCase__ )
lowerCamelCase_ = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
lowerCamelCase_ = tuple(answer[:2] )
lowerCamelCase_ = tuple(edge[::-1] )
assert edge in result or reverse in result
| 19 | 0 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
UpperCamelCase__ = ['bert-base-uncased', 'bert-base-cased']
UpperCamelCase__ = 'hf-internal-testing/tiny-bert-tf-only'
if is_tf_available():
class A ( tf.keras.Model ):
def __init__(self : Any , __UpperCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = tokenizer
UpperCAmelCase__ = AutoConfig.from_pretrained(__UpperCAmelCase )
UpperCAmelCase__ = TFAutoModel.from_config(__UpperCAmelCase )
def lowercase_ (self : int , __UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = self.tokenizer(__UpperCAmelCase )
UpperCAmelCase__ = self.bert(**__UpperCAmelCase )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class A ( unittest.TestCase ):
def lowercase_ (self : List[str] ) -> List[str]:
"""simple docstring"""
super().setUp()
UpperCAmelCase__ = [
BertTokenizer.from_pretrained(__UpperCAmelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
UpperCAmelCase__ = [TFBertTokenizer.from_pretrained(__UpperCAmelCase ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(__UpperCAmelCase , use_fast_bert_tokenizer=__UpperCAmelCase )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
UpperCAmelCase__ = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
UpperCAmelCase__ = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def lowercase_ (self : Any ) -> int:
"""simple docstring"""
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
UpperCAmelCase__ = tokenizer(__UpperCAmelCase , return_tensors="tf" , padding="longest" )
UpperCAmelCase__ = tf_tokenizer(__UpperCAmelCase )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def lowercase_ (self : List[Any] ) -> List[Any]:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase__ = tf_tokenizer(self.paired_sentences )
UpperCAmelCase__ = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def lowercase_ (self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase__ = tf.function(__UpperCAmelCase )
for test_inputs in (self.test_sentences, self.paired_sentences):
UpperCAmelCase__ = tf.constant(__UpperCAmelCase )
UpperCAmelCase__ = compiled_tokenizer(__UpperCAmelCase )
UpperCAmelCase__ = tf_tokenizer(__UpperCAmelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def lowercase_ (self : str ) -> List[str]:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase__ = ModelToSave(tokenizer=__UpperCAmelCase )
UpperCAmelCase__ = tf.convert_to_tensor(self.test_sentences )
UpperCAmelCase__ = model(__UpperCAmelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
UpperCAmelCase__ = Path(__UpperCAmelCase ) / "saved.model"
model.save(__UpperCAmelCase )
UpperCAmelCase__ = tf.keras.models.load_model(__UpperCAmelCase )
UpperCAmelCase__ = loaded_model(__UpperCAmelCase )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
| 65 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__A =1_6
__A =3_2
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ = 1_6 ):
lowerCamelCase_ = AutoTokenizer.from_pretrained("bert-base-cased" )
lowerCamelCase_ = load_dataset("glue" , "mrpc" )
def tokenize_function(lowerCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase_ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase_ = datasets.map(
lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase_ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowerCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase_ = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase_ = 1_6
elif accelerator.mixed_precision != "no":
lowerCamelCase_ = 8
else:
lowerCamelCase_ = None
return tokenizer.pad(
lowerCamelCase__ , padding="longest" , max_length=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_tensors="pt" , )
# Instantiate dataloaders.
lowerCamelCase_ = DataLoader(
tokenized_datasets["train"] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
lowerCamelCase_ = DataLoader(
tokenized_datasets["validation"] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__A =mocked_dataloaders # noqa: F811
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowerCamelCase__ ) == "1":
lowerCamelCase_ = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
lowerCamelCase_ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
lowerCamelCase_ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase_ = config["lr"]
lowerCamelCase_ = int(config["num_epochs"] )
lowerCamelCase_ = int(config["seed"] )
lowerCamelCase_ = int(config["batch_size"] )
set_seed(lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ = get_dataloaders(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
lowerCamelCase_ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCamelCase_ = batch_size // MAX_GPU_BATCH_SIZE
lowerCamelCase_ = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase_ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowerCamelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase_ = model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase_ = AdamW(params=model.parameters() , lr=lowerCamelCase__ )
# Instantiate scheduler
lowerCamelCase_ = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowerCamelCase__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
lowerCamelCase_ = os.path.split(lowerCamelCase__ )[-1].split("." )[0]
accelerator.init_trackers(lowerCamelCase__ , lowerCamelCase__ )
# Now we train the model
for epoch in range(lowerCamelCase__ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
lowerCamelCase_ = 0
for step, batch in enumerate(lowerCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCamelCase_ = model(**lowerCamelCase__ )
lowerCamelCase_ = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
lowerCamelCase_ = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase_ = model(**lowerCamelCase__ )
lowerCamelCase_ = outputs.logits.argmax(dim=-1 )
lowerCamelCase_ , lowerCamelCase_ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=lowerCamelCase__ , references=lowerCamelCase__ , )
lowerCamelCase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , lowerCamelCase__ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"accuracy": eval_metric["accuracy"],
"f1": eval_metric["f1"],
"train_loss": total_loss.item() / len(lowerCamelCase__ ),
"epoch": epoch,
} , step=lowerCamelCase__ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def lowerCamelCase_ ( ):
lowerCamelCase_ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowerCamelCase__ , default=lowerCamelCase__ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=lowerCamelCase__ , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = {"lr": 2e-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6}
training_function(lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
main()
| 19 | 0 |
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class lowerCamelCase :
'''simple docstring'''
def __init__( self: Dict , snake_case: Optional[Any] , snake_case: Tuple=13 , snake_case: Any=32 , snake_case: Union[str, Any]=2 , snake_case: Tuple=3 , snake_case: Union[str, Any]=16 , snake_case: Union[str, Any]=[1, 2, 1] , snake_case: Optional[Any]=[2, 2, 4] , snake_case: str=2 , snake_case: List[str]=2.0 , snake_case: Optional[int]=True , snake_case: Union[str, Any]=0.0 , snake_case: Optional[int]=0.0 , snake_case: Optional[Any]=0.1 , snake_case: List[str]="gelu" , snake_case: Any=False , snake_case: Optional[Any]=True , snake_case: Optional[int]=0.0_2 , snake_case: Any=1E-5 , snake_case: Optional[int]=True , snake_case: int=None , snake_case: Any=True , snake_case: str=10 , snake_case: Optional[Any]=8 , snake_case: Union[str, Any]=["stage1", "stage2", "stage3"] , snake_case: Tuple=[1, 2, 3] , ) -> Dict:
snake_case_ :Dict = parent
snake_case_ :List[Any] = batch_size
snake_case_ :Dict = image_size
snake_case_ :Dict = patch_size
snake_case_ :Tuple = num_channels
snake_case_ :List[Any] = embed_dim
snake_case_ :List[str] = depths
snake_case_ :str = num_heads
snake_case_ :Tuple = window_size
snake_case_ :Tuple = mlp_ratio
snake_case_ :int = qkv_bias
snake_case_ :Tuple = hidden_dropout_prob
snake_case_ :Optional[Any] = attention_probs_dropout_prob
snake_case_ :Dict = drop_path_rate
snake_case_ :Any = hidden_act
snake_case_ :Any = use_absolute_embeddings
snake_case_ :int = patch_norm
snake_case_ :List[Any] = layer_norm_eps
snake_case_ :Tuple = initializer_range
snake_case_ :str = is_training
snake_case_ :int = scope
snake_case_ :Tuple = use_labels
snake_case_ :Tuple = type_sequence_label_size
snake_case_ :str = encoder_stride
snake_case_ :List[Any] = out_features
snake_case_ :str = out_indices
def lowerCAmelCase_ ( self: Tuple ) -> Dict:
snake_case_ :Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ :str = None
if self.use_labels:
snake_case_ :Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ :Union[str, Any] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self: int ) -> Optional[Any]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowerCAmelCase_ ( self: List[Any] , snake_case: str , snake_case: int , snake_case: List[str] ) -> Any:
snake_case_ :Dict = MaskFormerSwinModel(config=snake_case )
model.to(snake_case )
model.eval()
snake_case_ :Tuple = model(snake_case )
snake_case_ :Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
snake_case_ :Any = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowerCAmelCase_ ( self: Optional[Any] , snake_case: int , snake_case: List[str] , snake_case: Tuple ) -> Union[str, Any]:
snake_case_ :Any = MaskFormerSwinBackbone(config=snake_case )
model.to(snake_case )
model.eval()
snake_case_ :Optional[Any] = model(snake_case )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(snake_case ):
snake_case_ :Optional[Any] = ["""stem"""]
snake_case_ :str = MaskFormerSwinBackbone(config=snake_case )
def lowerCAmelCase_ ( self: List[str] ) -> Optional[Any]:
snake_case_ :Optional[int] = self.prepare_config_and_inputs()
snake_case_, snake_case_, snake_case_ :str = config_and_inputs
snake_case_ :Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : Union[str, Any] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
_A : str = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {}
_A : List[str] = False
_A : Any = False
_A : Dict = False
_A : List[Any] = False
_A : Optional[int] = False
def lowerCAmelCase_ ( self: Dict ) -> Any:
snake_case_ :str = MaskFormerSwinModelTester(self )
snake_case_ :Optional[Any] = ConfigTester(self , config_class=snake_case , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def lowerCAmelCase_ ( self: List[str] ) -> Optional[int]:
pass
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase_ ( self: Any ) -> Tuple:
return
def lowerCAmelCase_ ( self: Any ) -> Any:
snake_case_ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def lowerCAmelCase_ ( self: Union[str, Any] ) -> int:
snake_case_ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*snake_case )
@unittest.skip("""Swin does not use inputs_embeds""" )
def lowerCAmelCase_ ( self: str ) -> List[str]:
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def lowerCAmelCase_ ( self: int ) -> Optional[int]:
pass
def lowerCAmelCase_ ( self: List[str] ) -> List[Any]:
snake_case_, snake_case_ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ :str = model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ :Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) )
def lowerCAmelCase_ ( self: Tuple ) -> Dict:
snake_case_, snake_case_ :int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ :Optional[int] = model_class(snake_case )
snake_case_ :str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ :str = [*signature.parameters.keys()]
snake_case_ :str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def lowerCAmelCase_ ( self: List[Any] ) -> List[Any]:
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def lowerCAmelCase_ ( self: Dict ) -> List[Any]:
pass
def lowerCAmelCase_ ( self: Union[str, Any] , snake_case: Union[str, Any] , snake_case: int , snake_case: Any , snake_case: List[str] ) -> str:
snake_case_ :List[str] = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
snake_case_ :List[Any] = model(**self._prepare_for_class(snake_case , snake_case ) )
snake_case_ :Any = outputs.hidden_states
snake_case_ :Optional[int] = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(snake_case ) , snake_case )
# Swin has a different seq_length
snake_case_ :str = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case_ :int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowerCAmelCase_ ( self: List[Any] ) -> Optional[int]:
snake_case_, snake_case_ :Any = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ :List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
snake_case_ :Tuple = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ :List[Any] = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , snake_case )
def lowerCAmelCase_ ( self: Optional[Any] ) -> Tuple:
snake_case_, snake_case_ :int = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ :List[Any] = 3
snake_case_ :List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
snake_case_ :Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case_ :Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
snake_case_ :List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
snake_case_ :str = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ :Any = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[str]:
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def lowerCAmelCase_ ( self: List[str] ) -> str:
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def lowerCAmelCase_ ( self: str ) -> List[Any]:
pass
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Optional[Any]:
snake_case_, snake_case_ :Dict = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(snake_case: str ):
snake_case_ :Optional[int] = 0
return t
def check_equivalence(snake_case: List[Any] , snake_case: Union[str, Any] , snake_case: int , snake_case: Tuple={} ):
with torch.no_grad():
snake_case_ :List[Any] = model(**snake_case , return_dict=snake_case , **snake_case )
snake_case_ :Any = model(**snake_case , return_dict=snake_case , **snake_case ).to_tuple()
def recursive_check(snake_case: List[Any] , snake_case: int ):
if isinstance(snake_case , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(snake_case , snake_case ):
recursive_check(snake_case , snake_case )
elif isinstance(snake_case , snake_case ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(snake_case , snake_case )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(snake_case ) , set_nan_tensor_to_zero(snake_case ) , atol=1E-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
f""" {torch.isnan(snake_case ).any()} and `inf`: {torch.isinf(snake_case )}. Dict has"""
f""" `nan`: {torch.isnan(snake_case ).any()} and `inf`: {torch.isinf(snake_case )}."""
) , )
recursive_check(snake_case , snake_case )
for model_class in self.all_model_classes:
snake_case_ :int = model_class(snake_case )
model.to(snake_case )
model.eval()
snake_case_ :Any = self._prepare_for_class(snake_case , snake_case )
snake_case_ :List[Any] = self._prepare_for_class(snake_case , snake_case )
check_equivalence(snake_case , snake_case , snake_case )
snake_case_ :Tuple = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
snake_case_ :Dict = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
check_equivalence(snake_case , snake_case , snake_case )
snake_case_ :Tuple = self._prepare_for_class(snake_case , snake_case )
snake_case_ :Any = self._prepare_for_class(snake_case , snake_case )
check_equivalence(snake_case , snake_case , snake_case , {"""output_hidden_states""": True} )
snake_case_ :Dict = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
snake_case_ :List[str] = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
check_equivalence(snake_case , snake_case , snake_case , {"""output_hidden_states""": True} )
@require_torch
class lowerCamelCase ( unittest.TestCase , _lowerCAmelCase ):
'''simple docstring'''
_A : int = (MaskFormerSwinBackbone,) if is_torch_available() else ()
_A : Tuple = MaskFormerSwinConfig
def lowerCAmelCase_ ( self: List[str] ) -> Optional[int]:
snake_case_ :Optional[Any] = MaskFormerSwinModelTester(self )
def lowerCAmelCase_ ( self: int ) -> Optional[int]:
snake_case_, snake_case_ :Any = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ :Tuple = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
snake_case_ :List[str] = backbone_class(snake_case )
backbone.to(snake_case )
backbone.eval()
snake_case_ :List[Any] = backbone(**snake_case )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , snake_case )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
snake_case_ :Union[str, Any] = backbone(**snake_case , output_hidden_states=snake_case )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
snake_case_, snake_case_, snake_case_ :List[Any] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
snake_case_ :List[Any] = backbone(**snake_case , output_attentions=snake_case )
self.assertIsNotNone(outputs.attentions )
| 66 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__A =None
__A =logging.get_logger(__name__)
__A ={'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__A ={
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
__A ={
'''facebook/mbart-large-en-ro''': 1_0_2_4,
'''facebook/mbart-large-cc25''': 1_0_2_4,
}
# fmt: off
__A =['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = ['input_ids', 'attention_mask']
lowerCAmelCase__ = MBartTokenizer
lowerCAmelCase__ = []
lowerCAmelCase__ = []
def __init__( self , lowercase=None , lowercase=None , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=None , lowercase=None , lowercase=None , **lowercase , ) -> Dict:
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else mask_token
super().__init__(
vocab_file=lowercase , tokenizer_file=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , src_lang=lowercase , tgt_lang=lowercase , additional_special_tokens=lowercase , **lowercase , )
lowerCamelCase_ = vocab_file
lowerCamelCase_ = False if not self.vocab_file else True
lowerCamelCase_ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
lowerCamelCase_ = {
lang_code: self.convert_tokens_to_ids(lowercase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase_ = src_lang if src_lang is not None else "en_XX"
lowerCamelCase_ = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def SCREAMING_SNAKE_CASE_( self ) -> str:
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , **lowercase ) -> List[Any]:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
lowerCamelCase_ = src_lang
lowerCamelCase_ = self(lowercase , add_special_tokens=lowercase , return_tensors=lowercase , **lowercase )
lowerCamelCase_ = self.convert_tokens_to_ids(lowercase )
lowerCamelCase_ = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = "en_XX" , lowercase = None , lowercase = "ro_RO" , **lowercase , ) -> BatchEncoding:
lowerCamelCase_ = src_lang
lowerCamelCase_ = tgt_lang
return super().prepare_seqaseq_batch(lowercase , lowercase , **lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = self.convert_tokens_to_ids(lowercase )
lowerCamelCase_ = []
lowerCamelCase_ = [self.eos_token_id, self.cur_lang_code]
lowerCamelCase_ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase_ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = self.convert_tokens_to_ids(lowercase )
lowerCamelCase_ = []
lowerCamelCase_ = [self.eos_token_id, self.cur_lang_code]
lowerCamelCase_ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase_ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.' )
return
lowerCamelCase_ = os.path.join(
lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ):
copyfile(self.vocab_file , lowercase )
return (out_vocab_file,)
| 19 | 0 |
'''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ = 50 ) -> int:
__lowerCamelCase = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f'{solution() = }')
| 67 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__A =pytest.mark.integration
@require_faiss
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(lowercase ) for x in np.arange(30 ).tolist()]} )
return dset
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
lowerCamelCase_ = dset.map(
lambda lowercase , lowercase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=lowercase , keep_in_memory=lowercase )
lowerCamelCase_ = dset.add_faiss_index("vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
dset.drop_index("vecs" )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase ) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name )
dset.load_faiss_index("vecs2" , tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(lowercase , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
from elasticsearch import Elasticsearch
lowerCamelCase_ = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
lowerCamelCase_ = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
lowerCamelCase_ = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=lowercase )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("filename" , "my_name-train_29" )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
@require_faiss
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase )
self.assertRaises(lowercase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCamelCase_ = np.eye(5 , dtype=np.floataa )[::-1]
lowerCamelCase_ , lowerCamelCase_ = index.search_batch(lowercase )
self.assertRaises(lowercase , index.search_batch , queries[0] )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
import faiss
lowerCamelCase_ = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCamelCase_ = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(lowercase ):
lowerCamelCase_ = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
import faiss
lowerCamelCase_ = faiss.IndexFlat(5 )
lowerCamelCase_ = FaissIndex(custom_index=lowercase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase ) as tmp_file:
index.save(tmp_file.name )
lowerCamelCase_ = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def lowerCamelCase_ ( lowerCamelCase__ ):
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowerCamelCase_ = "index.faiss"
lowerCamelCase_ = F'mock://{index_name}'
index.save(lowerCamelCase__ , storage_options=mockfs.storage_options )
lowerCamelCase_ = FaissIndex.load(lowerCamelCase__ , storage_options=mockfs.storage_options )
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ , lowerCamelCase_ = index.search(lowerCamelCase__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
lowerCamelCase_ = Elasticsearch()
lowerCamelCase_ = {"acknowledged": True}
lowerCamelCase_ = ElasticSearchIndex(es_client=lowercase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
lowerCamelCase_ = "foo"
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCamelCase_ = "foo"
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCamelCase_ = ["foo", "bar", "foobar"]
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search_batch(lowercase )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase ) , 0 )
self.assertListEqual([1, 1, 1] , lowercase )
# batched queries with timeout
lowerCamelCase_ = ["foo", "bar", "foobar"]
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search_batch(lowercase , request_timeout=30 )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase ) , 0 )
self.assertListEqual([1, 1, 1] , lowercase )
| 19 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = 'altclip_text_model'
def __init__( self , lowercase=250002 , lowercase=1024 , lowercase=24 , lowercase=16 , lowercase=4096 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=514 , lowercase=1 , lowercase=0.02 , lowercase=0.02 , lowercase=1e-05 , lowercase=1 , lowercase=0 , lowercase=2 , lowercase="absolute" , lowercase=True , lowercase=768 , **lowercase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = initializer_factor
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = project_dim
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = 'altclip_vision_model'
def __init__( self , lowercase=768 , lowercase=3072 , lowercase=512 , lowercase=12 , lowercase=12 , lowercase=3 , lowercase=224 , lowercase=32 , lowercase="quick_gelu" , lowercase=1e-5 , lowercase=0.0 , lowercase=0.02 , lowercase=1.0 , **lowercase , ) -> Tuple:
'''simple docstring'''
super().__init__(**lowercase )
A__ = hidden_size
A__ = intermediate_size
A__ = projection_dim
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = num_channels
A__ = patch_size
A__ = image_size
A__ = initializer_range
A__ = initializer_factor
A__ = attention_dropout
A__ = layer_norm_eps
A__ = hidden_act
@classmethod
def UpperCamelCase ( cls , lowercase , **lowercase ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(lowercase )
A__ , A__ = cls.get_config_dict(lowercase , **lowercase )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("model_type" ) == "altclip":
A__ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowercase , **lowercase )
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = 'altclip'
__lowerCamelCase = True
def __init__( self , lowercase=None , lowercase=None , lowercase=768 , lowercase=2.6592 , **lowercase ) -> List[str]:
'''simple docstring'''
A__ = kwargs.pop("text_config_dict" , lowercase )
A__ = kwargs.pop("vision_config_dict" , lowercase )
super().__init__(**lowercase )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
A__ = {}
# This is the complete result when using `text_config_dict`.
A__ = AltCLIPTextConfig(**lowercase ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
A__ = (
F'`{key}` is found in both `text_config_dict` and `text_config` but with different values. '
F'The value `text_config_dict["{key}"]` will be used instead.'
)
# If inferred from default argument values (just to be super careful)
else:
A__ = (
F'`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '
F'value `text_config["{key}"]` will be overriden.'
)
logger.warning(lowercase )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
A__ = {}
# This is the complete result when using `vision_config_dict`.
A__ = AltCLIPVisionConfig(**lowercase ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
A__ = {
str(lowercase ): value for key, value in _vision_config_dict["id2label"].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
A__ = (
F'`{key}` is found in both `vision_config_dict` and `vision_config` but with different '
F'values. The value `vision_config_dict["{key}"]` will be used instead.'
)
# If inferred from default argument values (just to be super careful)
else:
A__ = (
F'`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '
F'The value `vision_config["{key}"]` will be overriden.'
)
logger.warning(lowercase )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
A__ = {}
logger.info("`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values." )
if vision_config is None:
A__ = {}
logger.info("`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values." )
A__ = AltCLIPTextConfig(**lowercase )
A__ = AltCLIPVisionConfig(**lowercase )
A__ = projection_dim
A__ = logit_scale_init_value
A__ = 1.0
@classmethod
def UpperCamelCase ( cls , lowercase , lowercase , **lowercase ) -> Union[str, Any]:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase )
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = copy.deepcopy(self.__dict__ )
A__ = self.text_config.to_dict()
A__ = self.vision_config.to_dict()
A__ = self.__class__.model_type
return output
| 68 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.0_2 , lowercase=3 , lowercase=4 , lowercase=None , ) -> List[str]:
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
lowerCamelCase_ = self.vocab_size - 1
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowerCamelCase_ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> Dict:
lowerCamelCase_ = OpenAIGPTModel(config=lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , head_mask=lowercase )
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase )
lowerCamelCase_ = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> int:
lowerCamelCase_ = OpenAIGPTLMHeadModel(lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> Dict:
lowerCamelCase_ = OpenAIGPTDoubleHeadsModel(lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> int:
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = OpenAIGPTForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowerCAmelCase__ = (
{
'feature-extraction': OpenAIGPTModel,
'text-classification': OpenAIGPTForSequenceClassification,
'text-generation': OpenAIGPTLMHeadModel,
'zero-shot': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase=False ) -> Any:
lowerCamelCase_ = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowerCamelCase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowercase , )
lowerCamelCase_ = inputs_dict["labels"]
lowerCamelCase_ = inputs_dict["labels"]
lowerCamelCase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowercase , )
lowerCamelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
return inputs_dict
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = OpenAIGPTModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=lowercase , n_embd=37 )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowercase )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Any:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = OpenAIGPTModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" )
model.to(lowercase )
lowerCamelCase_ = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=lowercase ) # the president is
lowerCamelCase_ = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowerCamelCase_ = model.generate(lowercase , do_sample=lowercase )
self.assertListEqual(output_ids[0].tolist() , lowercase )
| 19 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
class UpperCamelCase :
def __init__( self, lowerCAmelCase__) -> Union[str, Any]:
snake_case_ = []
self.adlist.append(
{'value': '', 'next_states': [], 'fail_state': 0, 'output': []})
for keyword in keywords:
self.add_keyword(lowerCAmelCase__)
self.set_fail_transitions()
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> int | None:
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def a_ ( self, lowerCAmelCase__) -> None:
snake_case_ = 0
for character in keyword:
snake_case_ = self.find_next_state(lowerCAmelCase__, lowerCAmelCase__)
if next_state is None:
self.adlist.append(
{
'value': character,
'next_states': [],
'fail_state': 0,
'output': [],
})
self.adlist[current_state]["next_states"].append(len(self.adlist) - 1)
snake_case_ = len(self.adlist) - 1
else:
snake_case_ = next_state
self.adlist[current_state]["output"].append(lowerCAmelCase__)
def a_ ( self) -> None:
snake_case_ = deque()
for node in self.adlist[0]["next_states"]:
q.append(lowerCAmelCase__)
snake_case_ = 0
while q:
snake_case_ = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(lowerCAmelCase__)
snake_case_ = self.adlist[r]['fail_state']
while (
self.find_next_state(lowerCAmelCase__, self.adlist[child]['value']) is None
and state != 0
):
snake_case_ = self.adlist[state]['fail_state']
snake_case_ = self.find_next_state(
lowerCAmelCase__, self.adlist[child]['value'])
if self.adlist[child]["fail_state"] is None:
snake_case_ = 0
snake_case_ = (
self.adlist[child]['output']
+ self.adlist[self.adlist[child]['fail_state']]['output']
)
def a_ ( self, lowerCAmelCase__) -> dict[str, list[int]]:
snake_case_ = {} # returns a dict with keywords and list of its occurrences
snake_case_ = 0
for i in range(len(lowerCAmelCase__)):
while (
self.find_next_state(lowerCAmelCase__, string[i]) is None
and current_state != 0
):
snake_case_ = self.adlist[current_state]['fail_state']
snake_case_ = self.find_next_state(lowerCAmelCase__, string[i])
if next_state is None:
snake_case_ = 0
else:
snake_case_ = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
snake_case_ = []
result[key].append(i - len(lowerCAmelCase__) + 1)
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 69 |
__A ={str(digit): digit**5 for digit in range(1_0)}
def lowerCamelCase_ ( lowerCamelCase__ ):
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(lowerCamelCase__ ) )
def lowerCamelCase_ ( ):
return sum(
number
for number in range(1_0_0_0 , 1_0_0_0_0_0_0 )
if number == digits_fifth_powers_sum(lowerCamelCase__ ) )
if __name__ == "__main__":
print(solution())
| 19 | 0 |
'''simple docstring'''
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
A__ : Dict ='''2.13.1'''
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('''3.7'''):
raise ImportWarning(
'''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'''
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'''
'''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'''
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
A__ : Tuple =concatenate_datasets
A__ : Dict =DownloadConfig
A__ : int =DownloadManager
A__ : Union[str, Any] =DownloadMode
A__ : Tuple =DownloadConfig
A__ : Optional[Any] =DownloadMode
A__ : str =DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 70 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A =logging.get_logger(__name__)
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowerCamelCase_ = 1_9_2
lowerCamelCase_ = 7_6_8
lowerCamelCase_ = 1_2
lowerCamelCase_ = 3
lowerCamelCase_ = [8_0_0, 1_3_3_3]
lowerCamelCase_ = False
elif yolos_name == "yolos_s_dWr":
lowerCamelCase_ = 3_3_0
lowerCamelCase_ = 1_4
lowerCamelCase_ = 6
lowerCamelCase_ = 1_3_2_0
elif "yolos_s" in yolos_name:
lowerCamelCase_ = 3_8_4
lowerCamelCase_ = 1_5_3_6
lowerCamelCase_ = 1_2
lowerCamelCase_ = 6
elif "yolos_b" in yolos_name:
lowerCamelCase_ = [8_0_0, 1_3_4_4]
lowerCamelCase_ = 9_1
lowerCamelCase_ = "huggingface/label-files"
lowerCamelCase_ = "coco-detection-id2label.json"
lowerCamelCase_ = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowerCamelCase_ = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase_ = idalabel
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase_ = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
lowerCamelCase_ = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase_ = in_proj_weight[: config.hidden_size, :]
lowerCamelCase_ = in_proj_bias[: config.hidden_size]
lowerCamelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase_ = in_proj_weight[-config.hidden_size :, :]
lowerCamelCase_ = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_ ( lowerCamelCase__ ):
if "backbone" in name:
lowerCamelCase_ = name.replace("backbone" , "vit" )
if "cls_token" in name:
lowerCamelCase_ = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
lowerCamelCase_ = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
lowerCamelCase_ = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
lowerCamelCase_ = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowerCamelCase_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
lowerCamelCase_ = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
lowerCamelCase_ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowerCamelCase_ = name.replace("attn" , "attention.self" )
if "norm1" in name:
lowerCamelCase_ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowerCamelCase_ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowerCamelCase_ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowerCamelCase_ = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
lowerCamelCase_ = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
lowerCamelCase_ = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
lowerCamelCase_ = name.replace("vit.norm" , "vit.layernorm" )
return name
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
for key in orig_state_dict.copy().keys():
lowerCamelCase_ = orig_state_dict.pop(lowerCamelCase__ )
if "qkv" in key:
lowerCamelCase_ = key.split("." )
lowerCamelCase_ = int(key_split[2] )
lowerCamelCase_ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowerCamelCase_ = val[:dim, :]
lowerCamelCase_ = val[
dim : dim * 2, :
]
lowerCamelCase_ = val[-dim:, :]
else:
lowerCamelCase_ = val[:dim]
lowerCamelCase_ = val[dim : dim * 2]
lowerCamelCase_ = val[-dim:]
else:
lowerCamelCase_ = val
return orig_state_dict
def lowerCamelCase_ ( ):
lowerCamelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase_ = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
lowerCamelCase_ = get_yolos_config(lowerCamelCase__ )
# load original state_dict
lowerCamelCase_ = torch.load(lowerCamelCase__ , map_location="cpu" )["model"]
# load 🤗 model
lowerCamelCase_ = YolosForObjectDetection(lowerCamelCase__ )
model.eval()
lowerCamelCase_ = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image, prepared by YolosImageProcessor
lowerCamelCase_ = 8_0_0 if yolos_name != "yolos_ti" else 5_1_2
lowerCamelCase_ = YolosImageProcessor(format="coco_detection" , size=lowerCamelCase__ )
lowerCamelCase_ = image_processor(images=prepare_img() , return_tensors="pt" )
lowerCamelCase_ = model(**lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ = outputs.logits, outputs.pred_boxes
lowerCamelCase_ , lowerCamelCase_ = None, None
if yolos_name == "yolos_ti":
lowerCamelCase_ = torch.tensor(
[[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]] )
lowerCamelCase_ = torch.tensor(
[[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]] )
elif yolos_name == "yolos_s_200_pre":
lowerCamelCase_ = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] )
lowerCamelCase_ = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] )
elif yolos_name == "yolos_s_300_pre":
lowerCamelCase_ = torch.tensor(
[[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]] )
lowerCamelCase_ = torch.tensor(
[[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]] )
elif yolos_name == "yolos_s_dWr":
lowerCamelCase_ = torch.tensor(
[[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]] )
lowerCamelCase_ = torch.tensor(
[[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]] )
elif yolos_name == "yolos_base":
lowerCamelCase_ = torch.tensor(
[[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]] )
lowerCamelCase_ = torch.tensor(
[[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]] )
else:
raise ValueError(F'Unknown yolos_name: {yolos_name}' )
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(F'Saving model {yolos_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
lowerCamelCase_ = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
lowerCamelCase_ = model_mapping[yolos_name]
image_processor.push_to_hub(lowerCamelCase__ , organization="hustvl" )
model.push_to_hub(lowerCamelCase__ , organization="hustvl" )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__A =parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 19 | 0 |
from math import factorial
def A ( a_ = 100 ) -> int:
return sum(int(a_ ) for x in str(factorial(a_ ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 71 |
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = [0 for i in range(r + 1 )]
# nc0 = 1
lowerCamelCase_ = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
lowerCamelCase_ = min(lowerCamelCase__ , lowerCamelCase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=1_0, r=5))
| 19 | 0 |
"""simple docstring"""
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase__ = logging.getLogger()
def snake_case_ ( A_ : Path, A_ : list ):
'''simple docstring'''
_lowerCamelCase : int = '''\n'''.join(A_ )
Path(A_ ).open('''w''' ).writelines(A_ )
lowerCAmelCase__ = '''patrickvonplaten/t5-tiny-random'''
lowerCAmelCase__ = '''sshleifer/bart-tiny-random'''
lowerCAmelCase__ = '''sshleifer/tiny-mbart'''
lowerCAmelCase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class __snake_case ( _lowercase):
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : Tuple = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source'''
_lowerCamelCase : Tuple = input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
_lowerCamelCase : Optional[int] = [''' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.''']
_dump_articles(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Any = str(Path(self.get_auto_remove_tmp_dir() ) / '''scores.json''' )
_lowerCamelCase : List[str] = '''translation_en_to_de''' if model == T5_TINY else '''summarization'''
_lowerCamelCase : Optional[Any] = f'''
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
'''.split()
with patch.object(__lowerCAmelCase , '''argv''' , __lowerCAmelCase ):
run_generate()
assert Path(__lowerCAmelCase ).exists()
# os.remove(Path(output_file_name))
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
self.run_eval_tester(__lowerCAmelCase )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
self.run_eval_tester(__lowerCAmelCase )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source'''
_lowerCamelCase : List[str] = input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
_lowerCamelCase : List[Any] = {
'''en''': ['''Machine learning is great, isn\'t it?''', '''I like to eat bananas''', '''Tomorrow is another great day!'''],
'''de''': [
'''Maschinelles Lernen ist großartig, oder?''',
'''Ich esse gerne Bananen''',
'''Morgen ist wieder ein toller Tag!''',
],
}
_lowerCamelCase : Dict = Path(self.get_auto_remove_tmp_dir() )
_lowerCamelCase : List[str] = str(tmp_dir / '''scores.json''' )
_lowerCamelCase : Union[str, Any] = str(tmp_dir / '''val.target''' )
_dump_articles(__lowerCAmelCase , text['''en'''] )
_dump_articles(__lowerCAmelCase , text['''de'''] )
_lowerCamelCase : Union[str, Any] = '''translation_en_to_de''' if model == T5_TINY else '''summarization'''
_lowerCamelCase : str = f'''
run_eval_search.py
{model}
{str(__lowerCAmelCase )}
{str(__lowerCAmelCase )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
'''.split()
testargs.extend(['''--search''', '''num_beams=1:2 length_penalty=0.9:1.0'''] )
with patch.object(__lowerCAmelCase , '''argv''' , __lowerCAmelCase ):
with CaptureStdout() as cs:
run_search()
_lowerCamelCase : Optional[Any] = [''' num_beams | length_penalty''', model, '''Best score args''']
_lowerCamelCase : Optional[Any] = ['''Info''']
if "translation" in task:
expected_strings.append('''bleu''' )
else:
expected_strings.extend(__lowerCAmelCase )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(__lowerCAmelCase ).exists()
os.remove(Path(__lowerCAmelCase ) )
| 72 |
import math
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(lowerCamelCase__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("This should never happen" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
__A ='''Enter the base and the power separated by a comma: '''
__A, __A =map(int, input(prompt).split(''','''))
__A, __A =map(int, input(prompt).split(''','''))
# We find the log of each number, using the function res(), which takes two
# arguments.
__A =res(xa, ya)
__A =res(xa, ya)
# We check for the largest number
if resa > resa:
print('''Largest number is''', xa, '''^''', ya)
elif resa > resa:
print('''Largest number is''', xa, '''^''', ya)
else:
print('''Both are equal''')
| 19 | 0 |
from math import isclose, sqrt
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> tuple[float, float, float]:
__lowerCamelCase : Tuple = point_y / 4 / point_x
__lowerCamelCase : Tuple = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
__lowerCamelCase : List[Any] = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
__lowerCamelCase : int = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
__lowerCamelCase : Any = outgoing_gradient**2 + 4
__lowerCamelCase : Optional[int] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
__lowerCamelCase : str = (point_y - outgoing_gradient * point_x) ** 2 - 1_0_0
__lowerCamelCase : str = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
__lowerCamelCase : Optional[Any] = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
__lowerCamelCase : Optional[Any] = x_minus if isclose(lowerCamelCase__ , lowerCamelCase__ ) else x_plus
__lowerCamelCase : Tuple = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 1.4 , lowerCamelCase__ = -9.6 ) -> int:
__lowerCamelCase : int = 0
__lowerCamelCase : float = first_x_coord
__lowerCamelCase : float = first_y_coord
__lowerCamelCase : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Any = next_point(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F"""{solution() = }""")
| 73 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
__A =logging.get_logger(__name__)
__A =list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
__A =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Model type selected in the list: ' + ', '.join(snake_case_ )} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowerCAmelCase__ = field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase__ = field(
default=1_28 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowerCAmelCase__ = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowerCAmelCase__ = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowerCAmelCase__ = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase__ = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase__ = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowerCAmelCase__ = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 'train'
lowerCAmelCase__ = 'dev'
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __init__( self , lowercase , lowercase , lowercase = None , lowercase = Split.train , lowercase = False , lowercase = None , lowercase = "pt" , ) -> List[str]:
lowerCamelCase_ = args
lowerCamelCase_ = is_language_sensitive
lowerCamelCase_ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowercase , lowercase ):
try:
lowerCamelCase_ = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
lowerCamelCase_ = mode
# Load data features from cache or dataset file
lowerCamelCase_ = "v2" if args.version_2_with_negative else "v1"
lowerCamelCase_ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase_ = cached_features_file + ".lock"
with FileLock(lowercase ):
if os.path.exists(lowercase ) and not args.overwrite_cache:
lowerCamelCase_ = time.time()
lowerCamelCase_ = torch.load(lowercase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowerCamelCase_ = self.old_features["features"]
lowerCamelCase_ = self.old_features.get("dataset" , lowercase )
lowerCamelCase_ = self.old_features.get("examples" , lowercase )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
" future run" )
else:
if mode == Split.dev:
lowerCamelCase_ = self.processor.get_dev_examples(args.data_dir )
else:
lowerCamelCase_ = self.processor.get_train_examples(args.data_dir )
lowerCamelCase_ , lowerCamelCase_ = squad_convert_examples_to_features(
examples=self.examples , tokenizer=lowercase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=lowercase , )
lowerCamelCase_ = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , lowercase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ) -> Tuple:
return len(self.features )
def __getitem__( self , lowercase ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
lowerCamelCase_ = self.features[i]
lowerCamelCase_ = torch.tensor(feature.input_ids , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.attention_mask , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.cls_index , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.p_mask , dtype=torch.float )
lowerCamelCase_ = torch.tensor(feature.is_impossible , dtype=torch.float )
lowerCamelCase_ = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowerCamelCase_ = torch.tensor(feature.start_position , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 19 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
_lowercase = logging.get_logger(__name__)
@dataclass
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[int] = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self : int ,**A_ : Any ) -> Any:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
A = deprecated_arg[3:]
A = not kwargs.pop(A_ )
logger.warning(
F'{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'
F' {positive_arg}={kwargs[positive_arg]}' )
A = kwargs.pop('tpu_name' ,self.tpu_name )
A = kwargs.pop('device_idx' ,self.device_idx )
A = kwargs.pop('eager_mode' ,self.eager_mode )
A = kwargs.pop('use_xla' ,self.use_xla )
super().__init__(**A_ )
_lowerCamelCase: str = field(
default=_lowercase , metadata={'''help''': '''Name of TPU'''} , )
_lowerCamelCase: int = field(
default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , )
_lowerCamelCase: bool = field(default=_lowercase , metadata={'''help''': '''Benchmark models in eager model.'''} )
_lowerCamelCase: bool = field(
default=_lowercase , metadata={
'''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'''
} , )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self ,['tf'] )
A = None
if self.tpu:
try:
if self.tpu_name:
A = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
A = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
A = None
return tpu
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self ,['tf'] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
A = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] ,'GPU' )
A = tf.distribute.OneDeviceStrategy(device=F'/gpu:{self.device_idx}' )
else:
tf.config.set_visible_devices([] ,'GPU' ) # disable GPU
A = tf.distribute.OneDeviceStrategy(device=F'/cpu:{self.device_idx}' )
return strategy
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> bool:
requires_backends(self ,['tf'] )
return self._setup_tpu is not None
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> "tf.distribute.Strategy":
requires_backends(self ,['tf'] )
return self._setup_strategy
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> str:
requires_backends(self ,['tf'] )
return tf.config.list_physical_devices('GPU' )
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
requires_backends(self ,['tf'] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> bool:
return self.n_gpu > 0 | 74 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
@staticmethod
@abstractmethod
def SCREAMING_SNAKE_CASE_( lowercase ) -> int:
raise NotImplementedError()
@abstractmethod
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
raise NotImplementedError()
| 19 | 0 |
'''simple docstring'''
from math import isqrt
def a_ ( __snake_case : int ) -> bool:
"""simple docstring"""
return all(number % divisor != 0 for divisor in range(2 , isqrt(__snake_case ) + 1 ) )
def a_ ( __snake_case : int = 10**6 ) -> int:
"""simple docstring"""
lowerCamelCase_ =0
lowerCamelCase_ =1
lowerCamelCase_ =7
while prime_candidate < max_prime:
primes_count += is_prime(__snake_case )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 75 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
__A =logging.get_logger(__name__) # pylint: disable=invalid-name
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> List[Any]:
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
lowerCamelCase_ = (
f'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'
f' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , lowercase , standard_warn=lowercase )
lowerCamelCase_ = dict(scheduler.config )
lowerCamelCase_ = 1
lowerCamelCase_ = FrozenDict(lowercase )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
lowerCamelCase_ = (
f'The configuration file of this scheduler: {scheduler} has not set the configuration'
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , lowercase , standard_warn=lowercase )
lowerCamelCase_ = dict(scheduler.config )
lowerCamelCase_ = True
lowerCamelCase_ = FrozenDict(lowercase )
if safety_checker is None:
logger.warning(
f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=lowercase , segmentation_processor=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , unet=lowercase , scheduler=lowercase , safety_checker=lowercase , feature_extractor=lowercase , )
def SCREAMING_SNAKE_CASE_( self , lowercase = "auto" ) -> Tuple:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
self.enable_attention_slicing(lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> str:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowerCamelCase_ = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(lowercase , lowercase )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , lowercase , lowercase , lowercase , lowercase = 512 , lowercase = 512 , lowercase = 50 , lowercase = 7.5 , lowercase = None , lowercase = 1 , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "pil" , lowercase = True , lowercase = None , lowercase = 1 , **lowercase , ) -> int:
lowerCamelCase_ = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
lowerCamelCase_ = self.segmentation_model(**lowercase )
lowerCamelCase_ = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowerCamelCase_ = self.numpy_to_pil(lowercase )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowerCamelCase_ = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=lowercase , image=lowercase , mask_image=lowercase , height=lowercase , width=lowercase , num_inference_steps=lowercase , guidance_scale=lowercase , negative_prompt=lowercase , num_images_per_prompt=lowercase , eta=lowercase , generator=lowercase , latents=lowercase , output_type=lowercase , return_dict=lowercase , callback=lowercase , callback_steps=lowercase , )
| 19 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = {
"task_specific_params": {
"summarization": {"length_penalty": 1.0, "max_length": 128, "min_length": 12, "num_beams": 4},
"summarization_cnn": {"length_penalty": 2.0, "max_length": 142, "min_length": 56, "num_beams": 4},
"summarization_xsum": {"length_penalty": 1.0, "max_length": 62, "min_length": 11, "num_beams": 6},
}
}
SCREAMING_SNAKE_CASE : int = {
"task_specific_params.summarization.length_penalty": 1.0,
"task_specific_params.summarization.max_length": 128,
"task_specific_params.summarization.min_length": 12,
"task_specific_params.summarization.num_beams": 4,
"task_specific_params.summarization_cnn.length_penalty": 2.0,
"task_specific_params.summarization_cnn.max_length": 142,
"task_specific_params.summarization_cnn.min_length": 56,
"task_specific_params.summarization_cnn.num_beams": 4,
"task_specific_params.summarization_xsum.length_penalty": 1.0,
"task_specific_params.summarization_xsum.max_length": 62,
"task_specific_params.summarization_xsum.min_length": 11,
"task_specific_params.summarization_xsum.num_beams": 6,
}
self.assertEqual(flatten_dict(a ) , a )
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(a ) , x.transpose() ) )
SCREAMING_SNAKE_CASE : str = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(a )
self.assertTrue(np.allclose(transpose(a ) , transpose(a ).numpy() ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randn(3 , 4 , 5 )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(a )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , transpose(a , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE : Tuple = tf.constant(a )
self.assertTrue(np.allclose(transpose(a ) , transpose(a ).numpy() ) )
SCREAMING_SNAKE_CASE : int = np.random.randn(3 , 4 , 5 )
SCREAMING_SNAKE_CASE : Dict = tf.constant(a )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , transpose(a , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE : List[Any] = jnp.array(a )
self.assertTrue(np.allclose(transpose(a ) , np.asarray(transpose(a ) ) ) )
SCREAMING_SNAKE_CASE : List[Any] = np.random.randn(3 , 4 , 5 )
SCREAMING_SNAKE_CASE : Any = jnp.array(a )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , np.asarray(transpose(a , axes=(1, 2, 0) ) ) ) )
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , np.reshape(a , (4, 3) ) ) )
SCREAMING_SNAKE_CASE : Tuple = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , np.reshape(a , (12, 5) ) ) )
@require_torch
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(a )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , reshape(a , (4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE : Dict = np.random.randn(3 , 4 , 5 )
SCREAMING_SNAKE_CASE : Any = torch.tensor(a )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , reshape(a , (12, 5) ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE : Optional[int] = tf.constant(a )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , reshape(a , (4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randn(3 , 4 , 5 )
SCREAMING_SNAKE_CASE : int = tf.constant(a )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , reshape(a , (12, 5) ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.array(a )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , np.asarray(reshape(a , (4, 3) ) ) ) )
SCREAMING_SNAKE_CASE : str = np.random.randn(3 , 4 , 5 )
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.array(a )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , np.asarray(reshape(a , (12, 5) ) ) ) )
def __UpperCamelCase ( self : int ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(a ) , np.squeeze(a ) ) )
SCREAMING_SNAKE_CASE : str = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , np.squeeze(a , axis=2 ) ) )
@require_torch
def __UpperCamelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = np.random.randn(1 , 3 , 4 )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(a )
self.assertTrue(np.allclose(squeeze(a ) , squeeze(a ).numpy() ) )
SCREAMING_SNAKE_CASE : int = np.random.randn(1 , 4 , 1 , 5 )
SCREAMING_SNAKE_CASE : Dict = torch.tensor(a )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , squeeze(a , axis=2 ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = np.random.randn(1 , 3 , 4 )
SCREAMING_SNAKE_CASE : str = tf.constant(a )
self.assertTrue(np.allclose(squeeze(a ) , squeeze(a ).numpy() ) )
SCREAMING_SNAKE_CASE : List[str] = np.random.randn(1 , 4 , 1 , 5 )
SCREAMING_SNAKE_CASE : Any = tf.constant(a )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , squeeze(a , axis=2 ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = np.random.randn(1 , 3 , 4 )
SCREAMING_SNAKE_CASE : List[str] = jnp.array(a )
self.assertTrue(np.allclose(squeeze(a ) , np.asarray(squeeze(a ) ) ) )
SCREAMING_SNAKE_CASE : int = np.random.randn(1 , 4 , 1 , 5 )
SCREAMING_SNAKE_CASE : str = jnp.array(a )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , np.asarray(squeeze(a , axis=2 ) ) ) )
def __UpperCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , np.expand_dims(a , axis=1 ) ) )
@require_torch
def __UpperCamelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(a )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , expand_dims(a , axis=1 ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE : Dict = tf.constant(a )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , expand_dims(a , axis=1 ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.array(a )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , np.asarray(expand_dims(a , axis=1 ) ) ) ) | 76 |
from collections import deque
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = len(lowerCamelCase__ )
lowerCamelCase_ = deque()
lowerCamelCase_ = [False for _ in range(lowerCamelCase__ )]
lowerCamelCase_ = [-1 for _ in range(lowerCamelCase__ )]
lowerCamelCase_ = index_of[:]
def strong_connect(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = index # the number when this node is seen
lowerCamelCase_ = index # lowest rank node reachable from here
index += 1
stack.append(lowerCamelCase__ )
lowerCamelCase_ = True
for w in g[v]:
if index_of[w] == -1:
lowerCamelCase_ = strong_connect(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
lowerCamelCase_ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
lowerCamelCase_ = []
lowerCamelCase_ = stack.pop()
lowerCamelCase_ = False
component.append(lowerCamelCase__ )
while w != v:
lowerCamelCase_ = stack.pop()
lowerCamelCase_ = False
component.append(lowerCamelCase__ )
components.append(lowerCamelCase__ )
return index
lowerCamelCase_ = []
for v in range(lowerCamelCase__ ):
if index_of[v] == -1:
strong_connect(lowerCamelCase__ , 0 , lowerCamelCase__ )
return components
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = [[] for _ in range(lowerCamelCase__ )]
for u, v in edges:
g[u].append(lowerCamelCase__ )
return g
if __name__ == "__main__":
# Test
__A =7
__A =[0, 0, 1, 2, 3, 3, 4, 4, 6]
__A =[1, 3, 2, 0, 1, 4, 5, 6, 5]
__A =[(u, v) for u, v in zip(source, target)]
__A =create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 19 | 0 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def a_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Optional[Any]="pt" ):
'''simple docstring'''
lowercase__ : Optional[int] = {'add_prefix_space': True} if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and not line.startswith(' ' ) else {}
lowercase__ : str = padding_side
return tokenizer(
[line] , max_length=_lowerCAmelCase , padding='max_length' if pad_to_max_length else None , truncation=_lowerCAmelCase , return_tensors=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , **_lowerCAmelCase , )
def a_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str=None , ):
'''simple docstring'''
lowercase__ : List[Any] = input_ids.ne(_lowerCAmelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class UpperCAmelCase_ ( _a):
def __init__( self , a , a , a , a , a="train" , a=None , a=None , a=None , a="" , ) -> Any:
super().__init__()
lowercase__ : Any = Path(a ).joinpath(type_path + '.source' )
lowercase__ : Union[str, Any] = Path(a ).joinpath(type_path + '.target' )
lowercase__ : List[str] = self.get_char_lens(self.src_file )
lowercase__ : Any = max_source_length
lowercase__ : str = max_target_length
assert min(self.src_lens ) > 0, f"""found empty line in {self.src_file}"""
lowercase__ : List[Any] = tokenizer
lowercase__ : Optional[Any] = prefix
if n_obs is not None:
lowercase__ : str = self.src_lens[:n_obs]
lowercase__ : Optional[Any] = src_lang
lowercase__ : Tuple = tgt_lang
def __len__( self ) -> List[Any]:
return len(self.src_lens )
def __getitem__( self , a ) -> Dict[str, torch.Tensor]:
lowercase__ : Optional[Any] = index + 1 # linecache starts at 1
lowercase__ : Tuple = self.prefix + linecache.getline(str(self.src_file ) , a ).rstrip('\n' )
lowercase__ : List[Any] = linecache.getline(str(self.tgt_file ) , a ).rstrip('\n' )
assert source_line, f"""empty source line for index {index}"""
assert tgt_line, f"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , a ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
lowercase__ : str = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , a ) else self.tokenizer
)
lowercase__ : List[Any] = self.tokenizer.generator if isinstance(self.tokenizer , a ) else self.tokenizer
lowercase__ : Dict = encode_line(a , a , self.max_source_length , 'right' )
lowercase__ : Optional[int] = encode_line(a , a , self.max_target_length , 'right' )
lowercase__ : Dict = source_inputs['input_ids'].squeeze()
lowercase__ : Any = target_inputs['input_ids'].squeeze()
lowercase__ : Any = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _UpperCAmelCase ( a ) -> Optional[Any]:
return [len(a ) for x in Path(a ).open().readlines()]
def _UpperCAmelCase ( self , a ) -> Dict[str, torch.Tensor]:
lowercase__ : Union[str, Any] = torch.stack([x['input_ids'] for x in batch] )
lowercase__ : Dict = torch.stack([x['attention_mask'] for x in batch] )
lowercase__ : List[str] = torch.stack([x['decoder_input_ids'] for x in batch] )
lowercase__ : Optional[Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , a )
else self.tokenizer.pad_token_id
)
lowercase__ : Optional[int] = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , a )
else self.tokenizer.pad_token_id
)
lowercase__ : Optional[Any] = trim_batch(a , a )
lowercase__ , lowercase__ : Optional[int] = trim_batch(a , a , attention_mask=a )
lowercase__ : str = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
_UpperCamelCase : Any = getLogger(__name__)
def a_ ( _lowerCAmelCase : List[List] ):
'''simple docstring'''
return list(itertools.chain.from_iterable(_lowerCAmelCase ) )
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : int = get_git_info()
save_json(_lowerCAmelCase , os.path.join(_lowerCAmelCase , 'git_log.json' ) )
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any]=4 , **_lowerCAmelCase : Dict ):
'''simple docstring'''
with open(_lowerCAmelCase , 'w' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase , indent=_lowerCAmelCase , **_lowerCAmelCase )
def a_ ( _lowerCAmelCase : List[str] ):
'''simple docstring'''
with open(_lowerCAmelCase ) as f:
return json.load(_lowerCAmelCase )
def a_ ( ):
'''simple docstring'''
lowercase__ : int = git.Repo(search_parent_directories=_lowerCAmelCase )
lowercase__ : Union[str, Any] = {
'repo_id': str(_lowerCAmelCase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def a_ ( _lowerCAmelCase : Callable , _lowerCAmelCase : Iterable ):
'''simple docstring'''
return list(map(_lowerCAmelCase , _lowerCAmelCase ) )
def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] ):
'''simple docstring'''
with open(_lowerCAmelCase , 'wb' ) as f:
return pickle.dump(_lowerCAmelCase , _lowerCAmelCase )
def a_ ( _lowerCAmelCase : Any ):
'''simple docstring'''
def remove_articles(_lowerCAmelCase : Union[str, Any] ):
return re.sub(R'\b(a|an|the)\b' , ' ' , _lowerCAmelCase )
def white_space_fix(_lowerCAmelCase : Optional[Any] ):
return " ".join(text.split() )
def remove_punc(_lowerCAmelCase : int ):
lowercase__ : str = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowerCAmelCase : Dict ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowerCAmelCase ) ) ) )
def a_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Union[str, Any] = normalize_answer(_lowerCAmelCase ).split()
lowercase__ : Dict = normalize_answer(_lowerCAmelCase ).split()
lowercase__ : Tuple = Counter(_lowerCAmelCase ) & Counter(_lowerCAmelCase )
lowercase__ : Tuple = sum(common.values() )
if num_same == 0:
return 0
lowercase__ : List[str] = 1.0 * num_same / len(_lowerCAmelCase )
lowercase__ : List[Any] = 1.0 * num_same / len(_lowerCAmelCase )
lowercase__ : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Tuple ):
'''simple docstring'''
return normalize_answer(_lowerCAmelCase ) == normalize_answer(_lowerCAmelCase )
def a_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str] ):
'''simple docstring'''
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
lowercase__ : Dict = 0
for hypo, pred in zip(_lowerCAmelCase , _lowerCAmelCase ):
em += exact_match_score(_lowerCAmelCase , _lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
em /= len(_lowerCAmelCase )
return {"em": em}
def a_ ( _lowerCAmelCase : Any ):
'''simple docstring'''
return model_prefix.startswith('rag' )
def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] ):
'''simple docstring'''
lowercase__ : Union[str, Any] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
lowercase__ : int = 'dropout_rate'
for p in extra_params:
if getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
if not hasattr(_lowerCAmelCase , _lowerCAmelCase ) and not hasattr(_lowerCAmelCase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(_lowerCAmelCase ) )
delattr(_lowerCAmelCase , _lowerCAmelCase )
continue
lowercase__ : Any = p if hasattr(_lowerCAmelCase , _lowerCAmelCase ) else equivalent_param[p]
setattr(_lowerCAmelCase , _lowerCAmelCase , getattr(_lowerCAmelCase , _lowerCAmelCase ) )
delattr(_lowerCAmelCase , _lowerCAmelCase )
return hparams, config
| 77 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A ={'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 19 | 0 |
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , ):
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif electron_conc < 0:
raise ValueError('Electron concentration cannot be negative in a semiconductor' )
elif hole_conc < 0:
raise ValueError('Hole concentration cannot be negative in a semiconductor' )
elif intrinsic_conc < 0:
raise ValueError(
'Intrinsic concentration cannot be negative in a semiconductor' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A ={'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WavLMForAudioFrameClassification''',
'''WavLMForCTC''',
'''WavLMForSequenceClassification''',
'''WavLMForXVector''',
'''WavLMModel''',
'''WavLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 19 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''facebook/xmod-base''': '''https://huggingface.co/facebook/xmod-base/resolve/main/config.json''',
'''facebook/xmod-large-prenorm''': '''https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json''',
'''facebook/xmod-base-13-125k''': '''https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-125k''': '''https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-195k''': '''https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json''',
'''facebook/xmod-base-60-125k''': '''https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json''',
'''facebook/xmod-base-60-265k''': '''https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json''',
'''facebook/xmod-base-75-125k''': '''https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json''',
'''facebook/xmod-base-75-269k''': '''https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json''',
}
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''xmod'''
def __init__( self : Optional[int] , __UpperCAmelCase : int=30522 , __UpperCAmelCase : int=768 , __UpperCAmelCase : Optional[Any]=12 , __UpperCAmelCase : Union[str, Any]=12 , __UpperCAmelCase : Any=3072 , __UpperCAmelCase : Optional[Any]="gelu" , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : Dict=512 , __UpperCAmelCase : str=2 , __UpperCAmelCase : List[str]=0.02 , __UpperCAmelCase : Optional[Any]=1E-12 , __UpperCAmelCase : Union[str, Any]=1 , __UpperCAmelCase : Optional[Any]=0 , __UpperCAmelCase : Any=2 , __UpperCAmelCase : int="absolute" , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : str=False , __UpperCAmelCase : Any=2 , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : int=True , __UpperCAmelCase : int=True , __UpperCAmelCase : Union[str, Any]=("en_XX",) , __UpperCAmelCase : List[Any]=None , **__UpperCAmelCase : int , ):
'''simple docstring'''
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = hidden_act
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = initializer_range
_A = layer_norm_eps
_A = position_embedding_type
_A = use_cache
_A = classifier_dropout
_A = pre_norm
_A = adapter_reduction_factor
_A = adapter_layer_norm
_A = adapter_reuse_layer_norm
_A = ln_before_adapter
_A = list(__UpperCAmelCase )
_A = default_language
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
@property
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
_A = {0: "batch", 1: "choice", 2: "sequence"}
else:
_A = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 79 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__A ='''2.13.1'''
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('''3.7'''):
raise ImportWarning(
'''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'''
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'''
'''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'''
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__A =concatenate_datasets
__A =DownloadConfig
__A =DownloadManager
__A =DownloadMode
__A =DownloadConfig
__A =DownloadMode
__A =DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 19 | 0 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase_ ( a__ ):
__UpperCAmelCase = ['image_processor', 'tokenizer']
__UpperCAmelCase = 'ViltImageProcessor'
__UpperCAmelCase = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self , a=None , a=None , **a ):
UpperCamelCase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , a , )
UpperCamelCase__ = kwargs.pop("feature_extractor" )
UpperCamelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(a , a )
UpperCamelCase__ = self.image_processor
def __call__( self , a , a = None , a = True , a = False , a = None , a = None , a = 0 , a = None , a = None , a = None , a = False , a = False , a = False , a = False , a = True , a = None , **a , ):
UpperCamelCase__ = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_token_type_ids=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_length=a , verbose=a , return_tensors=a , **a , )
# add pixel_values + pixel_mask
UpperCamelCase__ = self.image_processor(a , return_tensors=a )
encoding.update(a )
return encoding
def __a ( self , *a , **a ):
return self.tokenizer.batch_decode(*a , **a )
def __a ( self , *a , **a ):
return self.tokenizer.decode(*a , **a )
@property
def __a ( self ):
UpperCamelCase__ = self.tokenizer.model_input_names
UpperCamelCase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __a ( self ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a , )
return self.image_processor_class
@property
def __a ( self ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , a , )
return self.image_processor
| 80 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A ={
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 19 | 0 |
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class __A :
"""simple docstring"""
def __init__( self ) -> Tuple:
a ={}
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A=1 ) -> Union[str, Any]:
if self.graph.get(__A ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
a =[[w, v]]
if not self.graph.get(__A ):
a =[]
def SCREAMING_SNAKE_CASE ( self ) -> int:
return list(self.graph )
def SCREAMING_SNAKE_CASE ( self , __A , __A ) -> Optional[Any]:
if self.graph.get(__A ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__A )
def SCREAMING_SNAKE_CASE ( self , __A=-2 , __A=-1 ) -> List[Any]:
if s == d:
return []
a =[]
a =[]
if s == -2:
a =list(self.graph )[0]
stack.append(__A )
visited.append(__A )
a =s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
a =s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__A )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
a =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__A ) != 0:
a =stack[len(__A ) - 1]
else:
a =ss
# check if se have reached the starting point
if len(__A ) == 0:
return visited
def SCREAMING_SNAKE_CASE ( self , __A=-1 ) -> int:
if c == -1:
a =floor(random() * 1_0000 ) + 10
for i in range(__A ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
a =floor(random() * c ) + 1
if n != i:
self.add_pair(__A , __A , 1 )
def SCREAMING_SNAKE_CASE ( self , __A=-2 ) -> List[str]:
a =deque()
a =[]
if s == -2:
a =list(self.graph )[0]
d.append(__A )
visited.append(__A )
while d:
a =d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def SCREAMING_SNAKE_CASE ( self , __A ) -> Optional[Any]:
a =0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def SCREAMING_SNAKE_CASE ( self , __A ) -> Tuple:
return len(self.graph[u] )
def SCREAMING_SNAKE_CASE ( self , __A=-2 ) -> List[Any]:
a =[]
a =[]
if s == -2:
a =list(self.graph )[0]
stack.append(__A )
visited.append(__A )
a =s
a =[]
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
a =s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
a =node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(__A ) != 0:
a =stack[len(__A ) - 1]
else:
a =ss
# check if se have reached the starting point
if len(__A ) == 0:
return sorted_nodes
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a =[]
a =[]
a =list(self.graph )[0]
stack.append(__A )
visited.append(__A )
a =-2
a =[]
a =s
a =False
a =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
a =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
a =len(__A ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
a =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
a =True
if len(__A ) != 0:
a =stack[len(__A ) - 1]
else:
a =False
indirect_parents.append(__A )
a =s
a =ss
# check if se have reached the starting point
if len(__A ) == 0:
return list(__A )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a =[]
a =[]
a =list(self.graph )[0]
stack.append(__A )
visited.append(__A )
a =-2
a =[]
a =s
a =False
a =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
a =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
a =len(__A ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
a =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
a =True
if len(__A ) != 0:
a =stack[len(__A ) - 1]
else:
a =False
indirect_parents.append(__A )
a =s
a =ss
# check if se have reached the starting point
if len(__A ) == 0:
return False
def SCREAMING_SNAKE_CASE ( self , __A=-2 , __A=-1 ) -> List[str]:
a =time()
self.dfs(__A , __A )
a =time()
return end - begin
def SCREAMING_SNAKE_CASE ( self , __A=-2 ) -> int:
a =time()
self.bfs(__A )
a =time()
return end - begin
class __A :
"""simple docstring"""
def __init__( self ) -> List[str]:
a ={}
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A=1 ) -> Dict:
# check if the u exists
if self.graph.get(__A ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
a =[[w, v]]
# add the other way
if self.graph.get(__A ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
a =[[w, u]]
def SCREAMING_SNAKE_CASE ( self , __A , __A ) -> Any:
if self.graph.get(__A ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__A )
# the other way round
if self.graph.get(__A ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(__A )
def SCREAMING_SNAKE_CASE ( self , __A=-2 , __A=-1 ) -> int:
if s == d:
return []
a =[]
a =[]
if s == -2:
a =list(self.graph )[0]
stack.append(__A )
visited.append(__A )
a =s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
a =s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__A )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
a =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__A ) != 0:
a =stack[len(__A ) - 1]
else:
a =ss
# check if se have reached the starting point
if len(__A ) == 0:
return visited
def SCREAMING_SNAKE_CASE ( self , __A=-1 ) -> List[Any]:
if c == -1:
a =floor(random() * 1_0000 ) + 10
for i in range(__A ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
a =floor(random() * c ) + 1
if n != i:
self.add_pair(__A , __A , 1 )
def SCREAMING_SNAKE_CASE ( self , __A=-2 ) -> str:
a =deque()
a =[]
if s == -2:
a =list(self.graph )[0]
d.append(__A )
visited.append(__A )
while d:
a =d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def SCREAMING_SNAKE_CASE ( self , __A ) -> str:
return len(self.graph[u] )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a =[]
a =[]
a =list(self.graph )[0]
stack.append(__A )
visited.append(__A )
a =-2
a =[]
a =s
a =False
a =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
a =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
a =len(__A ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
a =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
a =True
if len(__A ) != 0:
a =stack[len(__A ) - 1]
else:
a =False
indirect_parents.append(__A )
a =s
a =ss
# check if se have reached the starting point
if len(__A ) == 0:
return list(__A )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a =[]
a =[]
a =list(self.graph )[0]
stack.append(__A )
visited.append(__A )
a =-2
a =[]
a =s
a =False
a =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
a =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
a =len(__A ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
a =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
a =True
if len(__A ) != 0:
a =stack[len(__A ) - 1]
else:
a =False
indirect_parents.append(__A )
a =s
a =ss
# check if se have reached the starting point
if len(__A ) == 0:
return False
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
return list(self.graph )
def SCREAMING_SNAKE_CASE ( self , __A=-2 , __A=-1 ) -> Optional[int]:
a =time()
self.dfs(__A , __A )
a =time()
return end - begin
def SCREAMING_SNAKE_CASE ( self , __A=-2 ) -> Dict:
a =time()
self.bfs(__A )
a =time()
return end - begin | 81 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , ) -> Optional[int]:
lowerCamelCase_ = parent
lowerCamelCase_ = 13
lowerCamelCase_ = 7
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = 99
lowerCamelCase_ = 32
lowerCamelCase_ = 2
lowerCamelCase_ = 4
lowerCamelCase_ = 37
lowerCamelCase_ = "gelu"
lowerCamelCase_ = 0.1
lowerCamelCase_ = 0.1
lowerCamelCase_ = 512
lowerCamelCase_ = 16
lowerCamelCase_ = 2
lowerCamelCase_ = 0.0_2
lowerCamelCase_ = 3
lowerCamelCase_ = 4
lowerCamelCase_ = None
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = self.prepare_config_and_inputs()
lowerCamelCase_ = True
lowerCamelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Any:
lowerCamelCase_ = TFEsmModel(config=lowercase )
lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = [input_ids, input_mask]
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Tuple:
lowerCamelCase_ = True
lowerCamelCase_ = TFEsmModel(config=lowercase )
lowerCamelCase_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"encoder_hidden_states": encoder_hidden_states,
"encoder_attention_mask": encoder_attention_mask,
}
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = [input_ids, input_mask]
lowerCamelCase_ = model(lowercase , encoder_hidden_states=lowercase )
# Also check the case where encoder outputs are not passed
lowerCamelCase_ = model(lowercase , attention_mask=lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
lowerCamelCase_ = TFEsmForMaskedLM(config=lowercase )
lowerCamelCase_ = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = TFEsmForTokenClassification(config=lowercase )
lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
lowerCamelCase_ = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = TFEsmModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFEsmModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@unittest.skip("Protein models do not support embedding resizing." )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
pass
@unittest.skip("Protein models do not support embedding resizing." )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
pass
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(lowercase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCamelCase_ = model.get_bias()
assert isinstance(lowercase , lowercase )
for k, v in name.items():
assert isinstance(lowercase , tf.Variable )
else:
lowerCamelCase_ = model.get_output_embeddings()
assert x is None
lowerCamelCase_ = model.get_bias()
assert name is None
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
lowerCamelCase_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase_ = model(lowercase )[0]
lowerCamelCase_ = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , lowercase )
# compare the actual values for a slice.
lowerCamelCase_ = tf.constant(
[
[
[8.9_2_1_5_1_8, -1_0.5_8_9_8_1_4, -6.4_6_7_1_3_0_7],
[-6.3_9_6_7_1_5_6, -1_3.9_1_1_3_7_7, -1.1_2_1_1_9_1_5],
[-7.7_8_1_2_4_7, -1_3.9_5_1_5_5_7, -3.7_4_0_5_9_2],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
lowerCamelCase_ = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase_ = model(lowercase )[0]
# compare the actual values for a slice.
lowerCamelCase_ = tf.constant(
[
[
[0.1_4_4_4_3_0_9_2, 0.5_4_1_2_5_3_2_7, 0.3_2_4_7_7_3_9],
[0.3_0_3_4_0_4_8_4, 0.0_0_5_2_6_6_7_6, 0.3_1_0_7_7_7_2_2],
[0.3_2_2_7_8_0_4_3, -0.2_4_9_8_7_0_9_6, 0.3_4_1_4_6_2_8],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 19 | 0 |
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def _UpperCAmelCase ( snake_case = 50_00 ):
"""simple docstring"""
_lowerCAmelCase = [(i * (3 * i - 1)) // 2 for i in range(1 , snake_case )]
for i, pentagonal_i in enumerate(snake_case ):
for j in range(snake_case , len(snake_case ) ):
_lowerCAmelCase = pentagonal_nums[j]
_lowerCAmelCase = pentagonal_i + pentagonal_j
_lowerCAmelCase = pentagonal_j - pentagonal_i
if is_pentagonal(snake_case ) and is_pentagonal(snake_case ):
return b
return -1
if __name__ == "__main__":
print(f"{solution() = }")
| 82 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
__A =logging.get_logger(__name__)
def lowerCamelCase_ ( ):
# Get the sagemaker specific mp parameters from smp_options variable.
lowerCamelCase_ = os.getenv("SM_HP_MP_PARAMETERS" , "{}" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
lowerCamelCase_ = json.loads(lowerCamelCase__ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
lowerCamelCase_ = os.getenv("SM_FRAMEWORK_PARAMS" , "{}" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
lowerCamelCase_ = json.loads(lowerCamelCase__ )
if not mpi_options.get("sagemaker_mpi_enabled" , lowerCamelCase__ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = field(
default='' , metadata={'help': 'Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'} , )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." , lowercase , )
@cached_property
def SCREAMING_SNAKE_CASE_( self ) -> "torch.device":
logger.info("PyTorch: setting up devices" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" )
if self.no_cuda:
lowerCamelCase_ = torch.device("cpu" )
lowerCamelCase_ = 0
elif is_sagemaker_model_parallel_available():
lowerCamelCase_ = smp.local_rank()
lowerCamelCase_ = torch.device("cuda" , lowercase )
lowerCamelCase_ = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta )
lowerCamelCase_ = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) )
lowerCamelCase_ = torch.device("cuda" , self.local_rank )
lowerCamelCase_ = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
lowerCamelCase_ = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
lowerCamelCase_ = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta )
lowerCamelCase_ = torch.device("cuda" , self.local_rank )
lowerCamelCase_ = 1
if device.type == "cuda":
torch.cuda.set_device(lowercase )
return device
@property
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
return not is_sagemaker_model_parallel_available()
@property
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
return False
| 19 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase__ ( lowercase , unittest.TestCase ):
lowercase__ = KandinskyVaaPipeline
lowercase__ = [
"""image_embeds""",
"""negative_image_embeds""",
]
lowercase__ = ["""image_embeds""", """negative_image_embeds"""]
lowercase__ = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
lowercase__ = False
@property
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return 32
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return 32
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return self.time_input_dim
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return 100
@property
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
_UpperCamelCase : Dict = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_UpperCamelCase : str = UNetaDConditionModel(**lowerCamelCase__ )
return model
@property
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_UpperCamelCase : str = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_UpperCamelCase : Tuple = self.dummy_unet
_UpperCamelCase : List[Any] = self.dummy_movq
_UpperCamelCase : Optional[Any] = DDIMScheduler(
num_train_timesteps=1000 ,beta_schedule='linear' ,beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,clip_sample=lowerCamelCase__ ,set_alpha_to_one=lowerCamelCase__ ,steps_offset=1 ,prediction_type='epsilon' ,thresholding=lowerCamelCase__ ,)
_UpperCamelCase : List[str] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : Dict ,lowerCamelCase__ : str=0 ):
'''simple docstring'''
_UpperCamelCase : Tuple = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
_UpperCamelCase : Dict = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to(
lowerCamelCase__ )
if str(lowerCamelCase__ ).startswith('mps' ):
_UpperCamelCase : str = torch.manual_seed(lowerCamelCase__ )
else:
_UpperCamelCase : Tuple = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
_UpperCamelCase : List[str] = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_UpperCamelCase : List[Any] = 'cpu'
_UpperCamelCase : Dict = self.get_dummy_components()
_UpperCamelCase : Optional[int] = self.pipeline_class(**lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = pipe(**self.get_dummy_inputs(lowerCamelCase__ ) )
_UpperCamelCase : Dict = output.images
_UpperCamelCase : Dict = pipe(
**self.get_dummy_inputs(lowerCamelCase__ ) ,return_dict=lowerCamelCase__ ,)[0]
_UpperCamelCase : Tuple = image[0, -3:, -3:, -1]
_UpperCamelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase : Any = np.array(
[0.6_2_3_7_9_7_6, 1.0, 0.3_6_4_4_1_3_3_2, 1.0, 0.7_0_6_3_9_6_3_4, 0.2_9_8_7_7_1_8_6, 0.8_5_6_5_2_1_2_5, 0.5_2_1_6_8_4_3, 0.5_4_4_5_4_0_4_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : List[str] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy' )
_UpperCamelCase : Any = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' ,torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase__ )
_UpperCamelCase : List[str] = KandinskyVaaPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder' ,torch_dtype=torch.floataa )
_UpperCamelCase : Union[str, Any] = pipeline.to(lowerCamelCase__ )
pipeline.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCamelCase : int = 'red cat, 4k photo'
_UpperCamelCase : List[str] = torch.Generator(device='cuda' ).manual_seed(0 )
_UpperCamelCase , _UpperCamelCase : str = pipe_prior(
lowerCamelCase__ ,generator=lowerCamelCase__ ,num_inference_steps=5 ,negative_prompt='' ,).to_tuple()
_UpperCamelCase : Optional[int] = torch.Generator(device='cuda' ).manual_seed(0 )
_UpperCamelCase : Tuple = pipeline(
image_embeds=lowerCamelCase__ ,negative_image_embeds=lowerCamelCase__ ,generator=lowerCamelCase__ ,num_inference_steps=100 ,output_type='np' ,)
_UpperCamelCase : str = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(lowerCamelCase__ ,lowerCamelCase__ )
| 83 |
import math
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ = 0 , lowerCamelCase__ = 0 ):
lowerCamelCase_ = end or len(lowerCamelCase__ )
for i in range(lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = i
lowerCamelCase_ = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
lowerCamelCase_ = array[temp_index - 1]
temp_index -= 1
lowerCamelCase_ = temp_index_value
return array
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): # Max Heap
lowerCamelCase_ = index
lowerCamelCase_ = 2 * index + 1 # Left Node
lowerCamelCase_ = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
lowerCamelCase_ = left_index
if right_index < heap_size and array[largest] < array[right_index]:
lowerCamelCase_ = right_index
if largest != index:
lowerCamelCase_ , lowerCamelCase_ = array[largest], array[index]
heapify(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = len(lowerCamelCase__ )
for i in range(n // 2 , -1 , -1 ):
heapify(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
for i in range(n - 1 , 0 , -1 ):
lowerCamelCase_ , lowerCamelCase_ = array[0], array[i]
heapify(lowerCamelCase__ , 0 , lowerCamelCase__ )
return array
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = low
lowerCamelCase_ = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
lowerCamelCase_ , lowerCamelCase_ = array[j], array[i]
i += 1
def lowerCamelCase_ ( lowerCamelCase__ ):
if len(lowerCamelCase__ ) == 0:
return array
lowerCamelCase_ = 2 * math.ceil(math.loga(len(lowerCamelCase__ ) ) )
lowerCamelCase_ = 1_6
return intro_sort(lowerCamelCase__ , 0 , len(lowerCamelCase__ ) , lowerCamelCase__ , lowerCamelCase__ )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(lowerCamelCase__ )
max_depth -= 1
lowerCamelCase_ = median_of_a(lowerCamelCase__ , lowerCamelCase__ , start + ((end - start) // 2) + 1 , end - 1 )
lowerCamelCase_ = partition(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
intro_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = p
return insertion_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
__A =input('''Enter numbers separated by a comma : ''').strip()
__A =[float(item) for item in user_input.split(''',''')]
print(sort(unsorted))
| 19 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class _SCREAMING_SNAKE_CASE ( A__ ):
pass
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A ) -> None:
lowerCAmelCase_ :Any = data
lowerCAmelCase_ :Node | None = None
def __iter__( self ) -> Dict:
lowerCAmelCase_ :int = self
lowerCAmelCase_ :Tuple = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(__A )
yield node.data
lowerCAmelCase_ :Tuple = node.next_node
@property
def __lowerCAmelCase ( self ) -> bool:
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
__UpperCAmelCase = Node(1)
__UpperCAmelCase = Node(2)
__UpperCAmelCase = Node(3)
__UpperCAmelCase = Node(4)
print(root_node.has_loop) # False
__UpperCAmelCase = root_node.next_node
print(root_node.has_loop) # True
__UpperCAmelCase = Node(5)
__UpperCAmelCase = Node(6)
__UpperCAmelCase = Node(5)
__UpperCAmelCase = Node(6)
print(root_node.has_loop) # False
__UpperCAmelCase = Node(1)
print(root_node.has_loop) # False
| 84 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , *lowercase , lowercase=None , lowercase=None , **lowercase ) -> List[str]:
super().__init__(*lowercase , **lowercase )
lowerCamelCase_ = eval_examples
lowerCamelCase_ = post_process_function
def SCREAMING_SNAKE_CASE_( self , lowercase = None , lowercase=None , lowercase = None , lowercase = "eval" , **lowercase , ) -> Dict[str, float]:
lowerCamelCase_ = gen_kwargs.copy()
lowerCamelCase_ = (
gen_kwargs["max_length"] if gen_kwargs.get("max_length" ) is not None else self.args.generation_max_length
)
lowerCamelCase_ = (
gen_kwargs["num_beams"] if gen_kwargs.get("num_beams" ) is not None else self.args.generation_num_beams
)
lowerCamelCase_ = gen_kwargs
lowerCamelCase_ = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCamelCase_ = self.get_eval_dataloader(lowercase )
lowerCamelCase_ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase_ = self.compute_metrics
lowerCamelCase_ = None
lowerCamelCase_ = time.time()
lowerCamelCase_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCamelCase_ = eval_loop(
lowercase , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase , metric_key_prefix=lowercase , )
finally:
lowerCamelCase_ = compute_metrics
lowerCamelCase_ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
lowercase , lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowerCamelCase_ = self.post_process_function(lowercase , lowercase , lowercase )
lowerCamelCase_ = self.compute_metrics(lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowerCamelCase_ = metrics.pop(lowercase )
metrics.update(output.metrics )
else:
lowerCamelCase_ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowercase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowerCamelCase_ = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase )
return metrics
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase=None , lowercase = "test" , **lowercase ) -> Union[str, Any]:
lowerCamelCase_ = gen_kwargs.copy()
lowerCamelCase_ = self.get_test_dataloader(lowercase )
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase_ = self.compute_metrics
lowerCamelCase_ = None
lowerCamelCase_ = time.time()
lowerCamelCase_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCamelCase_ = eval_loop(
lowercase , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase , metric_key_prefix=lowercase , )
finally:
lowerCamelCase_ = compute_metrics
lowerCamelCase_ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
lowercase , lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCamelCase_ = self.post_process_function(lowercase , lowercase , lowercase , "predict" )
lowerCamelCase_ = self.compute_metrics(lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowerCamelCase_ = metrics.pop(lowercase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase )
| 19 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE : Tuple = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["ConditionalDetrFeatureExtractor"]
_SCREAMING_SNAKE_CASE : List[Any] = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__A =logging.get_logger(__name__) # pylint: disable=invalid-name
__A ='''
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
>>> repo = "openai/shap-e-img2img"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"
>>> image = load_image(image_url).convert("RGB")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], "corgi_3d.gif")
```
'''
@dataclass
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 42
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> List[str]:
super().__init__()
self.register_modules(
prior=lowercase , image_encoder=lowercase , image_processor=lowercase , scheduler=lowercase , renderer=lowercase , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
if latents is None:
lowerCamelCase_ = randn_tensor(lowercase , generator=lowercase , device=lowercase , dtype=lowercase )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowerCamelCase_ = latents.to(lowercase )
lowerCamelCase_ = latents * scheduler.init_noise_sigma
return latents
def SCREAMING_SNAKE_CASE_( self , lowercase=0 ) -> int:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowerCamelCase_ = torch.device(f'cuda:{gpu_id}' )
lowerCamelCase_ = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase , lowercase )
@property
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder , "_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(lowercase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , ) -> List[str]:
if isinstance(lowercase , lowercase ) and isinstance(image[0] , torch.Tensor ):
lowerCamelCase_ = torch.cat(lowercase , axis=0 ) if image[0].ndim == 4 else torch.stack(lowercase , axis=0 )
if not isinstance(lowercase , torch.Tensor ):
lowerCamelCase_ = self.image_processor(lowercase , return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
lowerCamelCase_ = image.to(dtype=self.image_encoder.dtype , device=lowercase )
lowerCamelCase_ = self.image_encoder(lowercase )["last_hidden_state"]
lowerCamelCase_ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowerCamelCase_ = image_embeds.repeat_interleave(lowercase , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase_ = torch.zeros_like(lowercase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase_ = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(lowercase )
def __call__( self , lowercase , lowercase = 1 , lowercase = 25 , lowercase = None , lowercase = None , lowercase = 4.0 , lowercase = 64 , lowercase = "pil" , lowercase = True , ) -> Union[str, Any]:
if isinstance(lowercase , PIL.Image.Image ):
lowerCamelCase_ = 1
elif isinstance(lowercase , torch.Tensor ):
lowerCamelCase_ = image.shape[0]
elif isinstance(lowercase , lowercase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowerCamelCase_ = len(lowercase )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(lowercase )}' )
lowerCamelCase_ = self._execution_device
lowerCamelCase_ = batch_size * num_images_per_prompt
lowerCamelCase_ = guidance_scale > 1.0
lowerCamelCase_ = self._encode_image(lowercase , lowercase , lowercase , lowercase )
# prior
self.scheduler.set_timesteps(lowercase , device=lowercase )
lowerCamelCase_ = self.scheduler.timesteps
lowerCamelCase_ = self.prior.config.num_embeddings
lowerCamelCase_ = self.prior.config.embedding_dim
lowerCamelCase_ = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , lowercase , lowercase , lowercase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowerCamelCase_ = latents.reshape(latents.shape[0] , lowercase , lowercase )
for i, t in enumerate(self.progress_bar(lowercase ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ = self.scheduler.scale_model_input(lowercase , lowercase )
lowerCamelCase_ = self.prior(
lowercase , timestep=lowercase , proj_embedding=lowercase , ).predicted_image_embedding
# remove the variance
lowerCamelCase_ , lowerCamelCase_ = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowerCamelCase_ , lowerCamelCase_ = noise_pred.chunk(2 )
lowerCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowerCamelCase_ = self.scheduler.step(
lowercase , timestep=lowercase , sample=lowercase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=lowercase )
lowerCamelCase_ = []
for i, latent in enumerate(lowercase ):
print()
lowerCamelCase_ = self.renderer.decode(
latent[None, :] , lowercase , size=lowercase , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(lowercase )
lowerCamelCase_ = torch.stack(lowercase )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
lowerCamelCase_ = images.cpu().numpy()
if output_type == "pil":
lowerCamelCase_ = [self.numpy_to_pil(lowercase ) for image in images]
# Offload last model to CPU
if hasattr(self , "final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=lowercase )
| 19 | 0 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase = 1000 ):
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = 1, 1
__lowerCAmelCase : int = 2
while True:
__lowerCAmelCase : Optional[int] = 0
__lowerCAmelCase : List[str] = fa + fa
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = fa, f
index += 1
for _ in str(_UpperCamelCase ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 86 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowerCamelCase_ ( ):
lowerCamelCase_ , lowerCamelCase_ = 9, 1_4 # noqa: F841
lowerCamelCase_ = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 1_4],
[3, 4, 9],
[5, 4, 1_0],
[1, 7, 1_1],
]
lowerCamelCase_ = defaultdict(lowerCamelCase__ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
lowerCamelCase_ = mst(lowerCamelCase__ )
lowerCamelCase_ = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
lowerCamelCase_ = tuple(answer[:2] )
lowerCamelCase_ = tuple(edge[::-1] )
assert edge in result or reverse in result
| 19 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''',
}
class snake_case_ ( __A ):
__A : Optional[Any] = "open-llama"
def __init__( self : Tuple , lowercase_ : str=10_00_00 , lowercase_ : Any=40_96 , lowercase_ : Dict=1_10_08 , lowercase_ : Union[str, Any]=32 , lowercase_ : int=32 , lowercase_ : Tuple="silu" , lowercase_ : Any=20_48 , lowercase_ : int=0.02 , lowercase_ : Any=1E-6 , lowercase_ : int=True , lowercase_ : Optional[Any]=0 , lowercase_ : Optional[Any]=1 , lowercase_ : Any=2 , lowercase_ : Any=False , lowercase_ : int=True , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Optional[int]=0.1 , lowercase_ : Optional[int]=True , lowercase_ : Union[str, Any]=True , lowercase_ : Any=None , **lowercase_ : Any , ) -> Union[str, Any]:
lowercase__ : Tuple = vocab_size
lowercase__ : Optional[Any] = max_position_embeddings
lowercase__ : Union[str, Any] = hidden_size
lowercase__ : Any = intermediate_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : Dict = num_attention_heads
lowercase__ : List[str] = hidden_act
lowercase__ : Any = initializer_range
lowercase__ : List[Any] = rms_norm_eps
lowercase__ : Optional[int] = use_cache
lowercase__ : Tuple = kwargs.pop(
"use_memorry_efficient_attention" , lowercase_ )
lowercase__ : Optional[Any] = hidden_dropout_prob
lowercase__ : str = attention_dropout_prob
lowercase__ : Tuple = use_stable_embedding
lowercase__ : Dict = shared_input_output_embedding
lowercase__ : Optional[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , tie_word_embeddings=lowercase_ , **lowercase_ , )
def __UpperCamelCase ( self : Dict ) -> int:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowercase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
F'''got {self.rope_scaling}''' )
lowercase__ : Any = self.rope_scaling.get("type" , lowercase_ )
lowercase__ : Tuple = self.rope_scaling.get("factor" , lowercase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(lowercase_ , lowercase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 87 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__A =1_6
__A =3_2
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ = 1_6 ):
lowerCamelCase_ = AutoTokenizer.from_pretrained("bert-base-cased" )
lowerCamelCase_ = load_dataset("glue" , "mrpc" )
def tokenize_function(lowerCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase_ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase_ = datasets.map(
lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase_ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowerCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase_ = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase_ = 1_6
elif accelerator.mixed_precision != "no":
lowerCamelCase_ = 8
else:
lowerCamelCase_ = None
return tokenizer.pad(
lowerCamelCase__ , padding="longest" , max_length=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_tensors="pt" , )
# Instantiate dataloaders.
lowerCamelCase_ = DataLoader(
tokenized_datasets["train"] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
lowerCamelCase_ = DataLoader(
tokenized_datasets["validation"] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__A =mocked_dataloaders # noqa: F811
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowerCamelCase__ ) == "1":
lowerCamelCase_ = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
lowerCamelCase_ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
lowerCamelCase_ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase_ = config["lr"]
lowerCamelCase_ = int(config["num_epochs"] )
lowerCamelCase_ = int(config["seed"] )
lowerCamelCase_ = int(config["batch_size"] )
set_seed(lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ = get_dataloaders(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
lowerCamelCase_ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCamelCase_ = batch_size // MAX_GPU_BATCH_SIZE
lowerCamelCase_ = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase_ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowerCamelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase_ = model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase_ = AdamW(params=model.parameters() , lr=lowerCamelCase__ )
# Instantiate scheduler
lowerCamelCase_ = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowerCamelCase__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
lowerCamelCase_ = os.path.split(lowerCamelCase__ )[-1].split("." )[0]
accelerator.init_trackers(lowerCamelCase__ , lowerCamelCase__ )
# Now we train the model
for epoch in range(lowerCamelCase__ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
lowerCamelCase_ = 0
for step, batch in enumerate(lowerCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCamelCase_ = model(**lowerCamelCase__ )
lowerCamelCase_ = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
lowerCamelCase_ = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase_ = model(**lowerCamelCase__ )
lowerCamelCase_ = outputs.logits.argmax(dim=-1 )
lowerCamelCase_ , lowerCamelCase_ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=lowerCamelCase__ , references=lowerCamelCase__ , )
lowerCamelCase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , lowerCamelCase__ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"accuracy": eval_metric["accuracy"],
"f1": eval_metric["f1"],
"train_loss": total_loss.item() / len(lowerCamelCase__ ),
"epoch": epoch,
} , step=lowerCamelCase__ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def lowerCamelCase_ ( ):
lowerCamelCase_ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowerCamelCase__ , default=lowerCamelCase__ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=lowerCamelCase__ , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = {"lr": 2e-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6}
training_function(lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
main()
| 19 | 0 |
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str]=13 , UpperCamelCase__ : int=30 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : str=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[Any]=32 , UpperCamelCase__ : int=5 , UpperCamelCase__ : List[Any]=4 , UpperCamelCase__ : List[str]=37 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Dict=10 , UpperCamelCase__ : List[Any]=0.02 , ) -> Dict:
"""simple docstring"""
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = image_size
__magic_name__ = patch_size
__magic_name__ = num_channels
__magic_name__ = is_training
__magic_name__ = use_labels
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__magic_name__ = (image_size // patch_size) ** 2
__magic_name__ = num_patches + 1
def _lowercase ( self : Any ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
return config, pixel_values
def _lowercase ( self : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int ) -> List[Any]:
"""simple docstring"""
__magic_name__ = FlaxViTModel(config=UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
__magic_name__ = (self.image_size, self.image_size)
__magic_name__ = (self.patch_size, self.patch_size)
__magic_name__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] ) -> Any:
"""simple docstring"""
__magic_name__ = self.type_sequence_label_size
__magic_name__ = FlaxViTForImageClassification(config=UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__magic_name__ = 1
__magic_name__ = FlaxViTForImageClassification(UpperCamelCase__ )
__magic_name__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ = model(UpperCamelCase__ )
def _lowercase ( self : int ) -> int:
"""simple docstring"""
__magic_name__ = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) ,
) = config_and_inputs
__magic_name__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( _A , unittest.TestCase ):
'''simple docstring'''
a__ = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def _lowercase ( self : Tuple ) -> None:
"""simple docstring"""
__magic_name__ = FlaxViTModelTester(self )
__magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def _lowercase ( self : int ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self : Optional[Any] ) -> str:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _lowercase ( self : Dict ) -> List[str]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
def _lowercase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = model_class(UpperCamelCase__ )
__magic_name__ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ = [*signature.parameters.keys()]
__magic_name__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def _lowercase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = model_class(UpperCamelCase__ )
@jax.jit
def model_jitted(UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Dict ):
return model(pixel_values=UpperCamelCase__ , **UpperCamelCase__ )
with self.subTest("""JIT Enabled""" ):
__magic_name__ = model_jitted(**UpperCamelCase__ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__magic_name__ = model_jitted(**UpperCamelCase__ ).to_tuple()
self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
for jitted_output, output in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _lowercase ( self : Tuple ) -> Any:
"""simple docstring"""
for model_class_name in self.all_model_classes:
__magic_name__ = model_class_name.from_pretrained("""google/vit-base-patch16-224""" )
__magic_name__ = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(UpperCamelCase__ )
| 88 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__A =None
__A =logging.get_logger(__name__)
__A ={'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__A ={
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
__A ={
'''facebook/mbart-large-en-ro''': 1_0_2_4,
'''facebook/mbart-large-cc25''': 1_0_2_4,
}
# fmt: off
__A =['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = ['input_ids', 'attention_mask']
lowerCAmelCase__ = MBartTokenizer
lowerCAmelCase__ = []
lowerCAmelCase__ = []
def __init__( self , lowercase=None , lowercase=None , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=None , lowercase=None , lowercase=None , **lowercase , ) -> Dict:
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else mask_token
super().__init__(
vocab_file=lowercase , tokenizer_file=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , src_lang=lowercase , tgt_lang=lowercase , additional_special_tokens=lowercase , **lowercase , )
lowerCamelCase_ = vocab_file
lowerCamelCase_ = False if not self.vocab_file else True
lowerCamelCase_ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
lowerCamelCase_ = {
lang_code: self.convert_tokens_to_ids(lowercase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase_ = src_lang if src_lang is not None else "en_XX"
lowerCamelCase_ = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def SCREAMING_SNAKE_CASE_( self ) -> str:
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , **lowercase ) -> List[Any]:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
lowerCamelCase_ = src_lang
lowerCamelCase_ = self(lowercase , add_special_tokens=lowercase , return_tensors=lowercase , **lowercase )
lowerCamelCase_ = self.convert_tokens_to_ids(lowercase )
lowerCamelCase_ = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = "en_XX" , lowercase = None , lowercase = "ro_RO" , **lowercase , ) -> BatchEncoding:
lowerCamelCase_ = src_lang
lowerCamelCase_ = tgt_lang
return super().prepare_seqaseq_batch(lowercase , lowercase , **lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = self.convert_tokens_to_ids(lowercase )
lowerCamelCase_ = []
lowerCamelCase_ = [self.eos_token_id, self.cur_lang_code]
lowerCamelCase_ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase_ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = self.convert_tokens_to_ids(lowercase )
lowerCamelCase_ = []
lowerCamelCase_ = [self.eos_token_id, self.cur_lang_code]
lowerCamelCase_ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase_ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.' )
return
lowerCamelCase_ = os.path.join(
lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ):
copyfile(self.vocab_file , lowercase )
return (out_vocab_file,)
| 19 | 0 |
'''simple docstring'''
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
__lowerCAmelCase = {
'''text_branch''': '''text_model''',
'''audio_branch''': '''audio_model.audio_encoder''',
'''attn''': '''attention.self''',
'''self.proj''': '''output.dense''',
'''attention.self_mask''': '''attn_mask''',
'''mlp.fc1''': '''intermediate.dense''',
'''mlp.fc2''': '''output.dense''',
'''norm1''': '''layernorm_before''',
'''norm2''': '''layernorm_after''',
'''bn0''': '''batch_norm''',
}
__lowerCAmelCase = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''')
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=False ) -> Any:
_a , _a : Union[str, Any] = create_model(
'HTSAT-tiny' , 'roberta' , lowerCAmelCase_ , precision='fp32' , device='cuda:0' if torch.cuda.is_available() else 'cpu' , enable_fusion=lowerCAmelCase_ , fusion_type='aff_2d' if enable_fusion else None , )
return model, model_cfg
def __lowerCamelCase ( lowerCAmelCase_ ) -> int:
_a : Optional[int] = {}
_a : Tuple = r'.*sequential.(\d+).*'
_a : int = r'.*_projection.(\d+).*'
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_a : Union[str, Any] = key.replace(lowerCAmelCase_ , lowerCAmelCase_ )
if re.match(lowerCAmelCase_ , lowerCAmelCase_ ):
# replace sequential layers with list
_a : List[str] = re.match(lowerCAmelCase_ , lowerCAmelCase_ ).group(1 )
_a : Optional[Any] = key.replace(f"""sequential.{sequential_layer}.""" , f"""layers.{int(lowerCAmelCase_ )//3}.linear.""" )
elif re.match(lowerCAmelCase_ , lowerCAmelCase_ ):
_a : str = int(re.match(lowerCAmelCase_ , lowerCAmelCase_ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
_a : Optional[Any] = 1 if projecton_layer == 0 else 2
_a : int = key.replace(f"""_projection.{projecton_layer}.""" , f"""_projection.linear{transformers_projection_layer}.""" )
if "audio" and "qkv" in key:
# split qkv into query key and value
_a : str = value
_a : List[str] = mixed_qkv.size(0 ) // 3
_a : str = mixed_qkv[:qkv_dim]
_a : int = mixed_qkv[qkv_dim : qkv_dim * 2]
_a : Any = mixed_qkv[qkv_dim * 2 :]
_a : List[Any] = query_layer
_a : Union[str, Any] = key_layer
_a : Tuple = value_layer
else:
_a : Dict = value
return model_state_dict
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> int:
_a , _a : Optional[Any] = init_clap(lowerCAmelCase_ , enable_fusion=lowerCAmelCase_ )
clap_model.eval()
_a : Tuple = clap_model.state_dict()
_a : Optional[int] = rename_state_dict(lowerCAmelCase_ )
_a : List[str] = ClapConfig()
_a : Tuple = enable_fusion
_a : int = ClapModel(lowerCAmelCase_ )
# ignore the spectrogram embedding layer
model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
transformers_config.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''')
__lowerCAmelCase = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 89 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__A =pytest.mark.integration
@require_faiss
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(lowercase ) for x in np.arange(30 ).tolist()]} )
return dset
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
lowerCamelCase_ = dset.map(
lambda lowercase , lowercase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=lowercase , keep_in_memory=lowercase )
lowerCamelCase_ = dset.add_faiss_index("vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
dset.drop_index("vecs" )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase ) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name )
dset.load_faiss_index("vecs2" , tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(lowercase , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
from elasticsearch import Elasticsearch
lowerCamelCase_ = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
lowerCamelCase_ = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
lowerCamelCase_ = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=lowercase )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("filename" , "my_name-train_29" )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
@require_faiss
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase )
self.assertRaises(lowercase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCamelCase_ = np.eye(5 , dtype=np.floataa )[::-1]
lowerCamelCase_ , lowerCamelCase_ = index.search_batch(lowercase )
self.assertRaises(lowercase , index.search_batch , queries[0] )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
import faiss
lowerCamelCase_ = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCamelCase_ = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(lowercase ):
lowerCamelCase_ = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
import faiss
lowerCamelCase_ = faiss.IndexFlat(5 )
lowerCamelCase_ = FaissIndex(custom_index=lowercase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase ) as tmp_file:
index.save(tmp_file.name )
lowerCamelCase_ = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def lowerCamelCase_ ( lowerCamelCase__ ):
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowerCamelCase_ = "index.faiss"
lowerCamelCase_ = F'mock://{index_name}'
index.save(lowerCamelCase__ , storage_options=mockfs.storage_options )
lowerCamelCase_ = FaissIndex.load(lowerCamelCase__ , storage_options=mockfs.storage_options )
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ , lowerCamelCase_ = index.search(lowerCamelCase__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
lowerCamelCase_ = Elasticsearch()
lowerCamelCase_ = {"acknowledged": True}
lowerCamelCase_ = ElasticSearchIndex(es_client=lowercase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
lowerCamelCase_ = "foo"
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCamelCase_ = "foo"
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCamelCase_ = ["foo", "bar", "foobar"]
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search_batch(lowercase )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase ) , 0 )
self.assertListEqual([1, 1, 1] , lowercase )
# batched queries with timeout
lowerCamelCase_ = ["foo", "bar", "foobar"]
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search_batch(lowercase , request_timeout=30 )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase ) , 0 )
self.assertListEqual([1, 1, 1] , lowercase )
| 19 | 0 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__A = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
]
)
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] ) -> Dict:
"""simple docstring"""
__lowerCamelCase = state_dict.pop(UpperCamelCase__ )
__lowerCamelCase = val
def lowerCamelCase_ ( UpperCamelCase__ : int ) -> Union[str, Any]:
"""simple docstring"""
__lowerCamelCase = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__lowerCamelCase = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
__lowerCamelCase = value
else:
__lowerCamelCase = value
return new_state_dict
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : Dict=False ) -> Union[str, Any]:
"""simple docstring"""
__lowerCamelCase = ''
if is_panoptic:
__lowerCamelCase = 'conditional_detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__lowerCamelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
__lowerCamelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase = in_proj_weight[:256, :]
__lowerCamelCase = in_proj_bias[:256]
__lowerCamelCase = in_proj_weight[256:512, :]
__lowerCamelCase = in_proj_bias[256:512]
__lowerCamelCase = in_proj_weight[-256:, :]
__lowerCamelCase = in_proj_bias[-256:]
def lowerCamelCase_ ( ) -> Dict:
"""simple docstring"""
__lowerCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCamelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str ) -> List[str]:
"""simple docstring"""
__lowerCamelCase = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
__lowerCamelCase = 'resnet101'
if "dc5" in model_name:
__lowerCamelCase = True
__lowerCamelCase = 'panoptic' in model_name
if is_panoptic:
__lowerCamelCase = 250
else:
__lowerCamelCase = 91
__lowerCamelCase = 'huggingface/label-files'
__lowerCamelCase = 'coco-detection-id2label.json'
__lowerCamelCase = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) )
__lowerCamelCase = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
__lowerCamelCase = idalabel
__lowerCamelCase = {v: k for k, v in idalabel.items()}
# load image processor
__lowerCamelCase = 'coco_panoptic' if is_panoptic else 'coco_detection'
__lowerCamelCase = ConditionalDetrImageProcessor(format=UpperCamelCase__ )
# prepare image
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(images=UpperCamelCase__ , return_tensors='pt' )
__lowerCamelCase = encoding['pixel_values']
logger.info(F"""Converting model {model_name}...""" )
# load original model from torch hub
__lowerCamelCase = torch.hub.load('DeppMeng/ConditionalDETR' , UpperCamelCase__ , pretrained=UpperCamelCase__ ).eval()
__lowerCamelCase = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
__lowerCamelCase = 'conditional_detr.' + src
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = rename_backbone_keys(UpperCamelCase__ )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCamelCase__ , is_panoptic=UpperCamelCase__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__lowerCamelCase = 'conditional_detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('conditional_detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
__lowerCamelCase = state_dict.pop(UpperCamelCase__ )
__lowerCamelCase = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__lowerCamelCase = state_dict.pop(UpperCamelCase__ )
__lowerCamelCase = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
__lowerCamelCase = state_dict.pop(UpperCamelCase__ )
__lowerCamelCase = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
__lowerCamelCase = state_dict.pop(UpperCamelCase__ )
__lowerCamelCase = val
# finally, create HuggingFace model and load state dict
__lowerCamelCase = ConditionalDetrForSegmentation(UpperCamelCase__ ) if is_panoptic else ConditionalDetrForObjectDetection(UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
model.push_to_hub(repo_id=UpperCamelCase__ , organization='DepuMeng' , commit_message='Add model' )
# verify our conversion
__lowerCamelCase = conditional_detr(UpperCamelCase__ )
__lowerCamelCase = model(UpperCamelCase__ )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1E-4 )
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
image_processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="conditional_detr_resnet50",
type=str,
help="Name of the CONDITIONAL_DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
__A = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 90 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.0_2 , lowercase=3 , lowercase=4 , lowercase=None , ) -> List[str]:
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
lowerCamelCase_ = self.vocab_size - 1
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowerCamelCase_ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> Dict:
lowerCamelCase_ = OpenAIGPTModel(config=lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , head_mask=lowercase )
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase )
lowerCamelCase_ = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> int:
lowerCamelCase_ = OpenAIGPTLMHeadModel(lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> Dict:
lowerCamelCase_ = OpenAIGPTDoubleHeadsModel(lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> int:
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = OpenAIGPTForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowerCAmelCase__ = (
{
'feature-extraction': OpenAIGPTModel,
'text-classification': OpenAIGPTForSequenceClassification,
'text-generation': OpenAIGPTLMHeadModel,
'zero-shot': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase=False ) -> Any:
lowerCamelCase_ = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowerCamelCase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowercase , )
lowerCamelCase_ = inputs_dict["labels"]
lowerCamelCase_ = inputs_dict["labels"]
lowerCamelCase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowercase , )
lowerCamelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
return inputs_dict
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = OpenAIGPTModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=lowercase , n_embd=37 )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowercase )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Any:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = OpenAIGPTModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" )
model.to(lowercase )
lowerCamelCase_ = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=lowercase ) # the president is
lowerCamelCase_ = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowerCamelCase_ = model.generate(lowercase , do_sample=lowercase )
self.assertListEqual(output_ids[0].tolist() , lowercase )
| 19 | 0 |
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
UpperCAmelCase_ : Tuple = re.compile("""[^A-Za-z_0-9]""")
# parameters used in DuplicationIndex
UpperCAmelCase_ : List[Any] = 10
UpperCAmelCase_ : List[str] = 256
def _A (__a ) -> Optional[MinHash]:
"""simple docstring"""
if len(__a ) < MIN_NUM_TOKENS:
return None
SCREAMING_SNAKE_CASE_ : Union[str, Any] = MinHash(num_perm=__a )
for token in set(__a ):
min_hash.update(token.encode() )
return min_hash
def _A (__a ) -> Set[str]:
"""simple docstring"""
return {t for t in NON_ALPHA.split(__a ) if len(t.strip() ) > 0}
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Dict , *,
lowercase_ : float = 0.85 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = duplication_jaccard_threshold
SCREAMING_SNAKE_CASE_ : Any = NUM_PERM
SCREAMING_SNAKE_CASE_ : Union[str, Any] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm)
SCREAMING_SNAKE_CASE_ : List[Any] = defaultdict(lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : Tuple , lowercase_ : MinHash):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = self._index.query(lowercase_)
if code_key in self._index.keys:
print(F'Duplicate key {code_key}')
return
self._index.insert(lowercase_ , lowercase_)
if len(lowercase_) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(lowercase_)
break
else:
self._duplicate_clusters[close_duplicates[0]].add(lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = []
for base, duplicates in self._duplicate_clusters.items():
SCREAMING_SNAKE_CASE_ : Tuple = [base] + list(lowercase_)
# reformat the cluster to be a list of dict
SCREAMING_SNAKE_CASE_ : Optional[int] = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(lowercase_)
return duplicate_clusters
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.get_duplicate_clusters()
with open(lowercase_ , '''w''') as f:
json.dump(lowercase_ , lowercase_)
def _A (__a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = element
SCREAMING_SNAKE_CASE_ : Dict = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def _A (__a ) -> List[Any]:
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(__a , max_queue_size=1_00_00 ) , chunksize=1_00 , ):
if data is not None:
yield data
def _A (__a , __a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = DuplicationIndex(duplication_jaccard_threshold=__a )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(__a ) ) , max_queue_size=1_00 ) ):
di.add(__a , __a )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def _A (__a , __a ) -> float:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = get_tokens(__a )
SCREAMING_SNAKE_CASE_ : Any = get_tokens(__a )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
UpperCAmelCase_ : List[str] = None
def _A (__a , __a ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = []
for elementa in cluster:
SCREAMING_SNAKE_CASE_ : Any = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(__a , __a ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = 1
extremes.append(__a )
return extremes
def _A (__a , __a , __a ) -> List[str]:
"""simple docstring"""
global _shared_dataset
SCREAMING_SNAKE_CASE_ : Any = dataset
SCREAMING_SNAKE_CASE_ : List[Any] = []
SCREAMING_SNAKE_CASE_ : str = partial(_find_cluster_extremes_shared , jaccard_threshold=__a )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
__a , __a , ) , total=len(__a ) , ):
extremes_list.append(__a )
return extremes_list
def _A (__a , __a = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = make_duplicate_clusters(__a , __a )
SCREAMING_SNAKE_CASE_ : List[Any] = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
SCREAMING_SNAKE_CASE_ : Optional[int] = {}
SCREAMING_SNAKE_CASE_ : int = find_extremes(__a , __a , __a )
for extremes in extremes_clusters:
for element in extremes:
SCREAMING_SNAKE_CASE_ : int = element
SCREAMING_SNAKE_CASE_ : Optional[int] = duplicate_indices - set(extreme_dict.keys() )
SCREAMING_SNAKE_CASE_ : Any = dataset.filter(lambda __a , __a : idx not in remove_indices , with_indices=__a )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
SCREAMING_SNAKE_CASE_ : Dict = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = extreme_dict[element['''base_index''']]['''copies''']
print(f'Original dataset size: {len(__a )}' )
print(f'Number of duplicate clusters: {len(__a )}' )
print(f'Files in duplicate cluster: {len(__a )}' )
print(f'Unique files in duplicate cluster: {len(__a )}' )
print(f'Filtered dataset size: {len(__a )}' )
return ds_filter, duplicate_clusters
| 91 |
__A ={str(digit): digit**5 for digit in range(1_0)}
def lowerCamelCase_ ( lowerCamelCase__ ):
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(lowerCamelCase__ ) )
def lowerCamelCase_ ( ):
return sum(
number
for number in range(1_0_0_0 , 1_0_0_0_0_0_0 )
if number == digits_fifth_powers_sum(lowerCamelCase__ ) )
if __name__ == "__main__":
print(solution())
| 19 | 0 |
from ..utils import DummyObject, requires_backends
class a__ ( metaclass=snake_case__ ):
_a : List[str] = ["""keras_nlp"""]
def __init__( self , *_A , **_A ):
"""simple docstring"""
requires_backends(self , ["keras_nlp"] )
| 92 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A =logging.get_logger(__name__)
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowerCamelCase_ = 1_9_2
lowerCamelCase_ = 7_6_8
lowerCamelCase_ = 1_2
lowerCamelCase_ = 3
lowerCamelCase_ = [8_0_0, 1_3_3_3]
lowerCamelCase_ = False
elif yolos_name == "yolos_s_dWr":
lowerCamelCase_ = 3_3_0
lowerCamelCase_ = 1_4
lowerCamelCase_ = 6
lowerCamelCase_ = 1_3_2_0
elif "yolos_s" in yolos_name:
lowerCamelCase_ = 3_8_4
lowerCamelCase_ = 1_5_3_6
lowerCamelCase_ = 1_2
lowerCamelCase_ = 6
elif "yolos_b" in yolos_name:
lowerCamelCase_ = [8_0_0, 1_3_4_4]
lowerCamelCase_ = 9_1
lowerCamelCase_ = "huggingface/label-files"
lowerCamelCase_ = "coco-detection-id2label.json"
lowerCamelCase_ = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowerCamelCase_ = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase_ = idalabel
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase_ = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
lowerCamelCase_ = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase_ = in_proj_weight[: config.hidden_size, :]
lowerCamelCase_ = in_proj_bias[: config.hidden_size]
lowerCamelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase_ = in_proj_weight[-config.hidden_size :, :]
lowerCamelCase_ = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_ ( lowerCamelCase__ ):
if "backbone" in name:
lowerCamelCase_ = name.replace("backbone" , "vit" )
if "cls_token" in name:
lowerCamelCase_ = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
lowerCamelCase_ = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
lowerCamelCase_ = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
lowerCamelCase_ = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowerCamelCase_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
lowerCamelCase_ = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
lowerCamelCase_ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowerCamelCase_ = name.replace("attn" , "attention.self" )
if "norm1" in name:
lowerCamelCase_ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowerCamelCase_ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowerCamelCase_ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowerCamelCase_ = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
lowerCamelCase_ = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
lowerCamelCase_ = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
lowerCamelCase_ = name.replace("vit.norm" , "vit.layernorm" )
return name
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
for key in orig_state_dict.copy().keys():
lowerCamelCase_ = orig_state_dict.pop(lowerCamelCase__ )
if "qkv" in key:
lowerCamelCase_ = key.split("." )
lowerCamelCase_ = int(key_split[2] )
lowerCamelCase_ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowerCamelCase_ = val[:dim, :]
lowerCamelCase_ = val[
dim : dim * 2, :
]
lowerCamelCase_ = val[-dim:, :]
else:
lowerCamelCase_ = val[:dim]
lowerCamelCase_ = val[dim : dim * 2]
lowerCamelCase_ = val[-dim:]
else:
lowerCamelCase_ = val
return orig_state_dict
def lowerCamelCase_ ( ):
lowerCamelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase_ = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
lowerCamelCase_ = get_yolos_config(lowerCamelCase__ )
# load original state_dict
lowerCamelCase_ = torch.load(lowerCamelCase__ , map_location="cpu" )["model"]
# load 🤗 model
lowerCamelCase_ = YolosForObjectDetection(lowerCamelCase__ )
model.eval()
lowerCamelCase_ = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image, prepared by YolosImageProcessor
lowerCamelCase_ = 8_0_0 if yolos_name != "yolos_ti" else 5_1_2
lowerCamelCase_ = YolosImageProcessor(format="coco_detection" , size=lowerCamelCase__ )
lowerCamelCase_ = image_processor(images=prepare_img() , return_tensors="pt" )
lowerCamelCase_ = model(**lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ = outputs.logits, outputs.pred_boxes
lowerCamelCase_ , lowerCamelCase_ = None, None
if yolos_name == "yolos_ti":
lowerCamelCase_ = torch.tensor(
[[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]] )
lowerCamelCase_ = torch.tensor(
[[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]] )
elif yolos_name == "yolos_s_200_pre":
lowerCamelCase_ = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] )
lowerCamelCase_ = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] )
elif yolos_name == "yolos_s_300_pre":
lowerCamelCase_ = torch.tensor(
[[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]] )
lowerCamelCase_ = torch.tensor(
[[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]] )
elif yolos_name == "yolos_s_dWr":
lowerCamelCase_ = torch.tensor(
[[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]] )
lowerCamelCase_ = torch.tensor(
[[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]] )
elif yolos_name == "yolos_base":
lowerCamelCase_ = torch.tensor(
[[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]] )
lowerCamelCase_ = torch.tensor(
[[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]] )
else:
raise ValueError(F'Unknown yolos_name: {yolos_name}' )
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(F'Saving model {yolos_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
lowerCamelCase_ = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
lowerCamelCase_ = model_mapping[yolos_name]
image_processor.push_to_hub(lowerCamelCase__ , organization="hustvl" )
model.push_to_hub(lowerCamelCase__ , organization="hustvl" )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__A =parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 19 | 0 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase__ ( lowerCamelCase_ , unittest.TestCase ):
lowerCAmelCase_ = RobertaTokenizer
lowerCAmelCase_ = RobertaTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = {'''cls_token''': '''<s>'''}
def _snake_case ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase_ : Any = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowercase_ : List[str] = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
lowercase_ : List[Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowercase_ : Union[str, Any] = {'''unk_token''': '''<unk>'''}
lowercase_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def _snake_case ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : List[Any] = '''lower newer'''
lowercase_ : Dict = '''lower newer'''
return input_text, output_text
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[int] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase_ : List[Any] = '''lower newer'''
lowercase_ : Union[str, Any] = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowercase_ : Union[str, Any] = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) # , add_prefix_space=True)
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = tokens + [tokenizer.unk_token]
lowercase_ : Tuple = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[str] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[Any] = self.tokenizer_class.from_pretrained('''roberta-base''' )
lowercase_ : Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
lowercase_ : str = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
lowercase_ : str = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
lowercase_ : str = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Tuple = self.get_tokenizer()
lowercase_ : Optional[Any] = '''Encode this sequence.'''
lowercase_ : Optional[Any] = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
lowercase_ : int = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
lowercase_ : List[str] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
lowercase_ : Any = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing spaces after special tokens
lowercase_ : Any = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE )} ) # mask token has a left space
lowercase_ : List[Any] = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
lowercase_ : int = '''Encode <mask> sequence'''
lowercase_ : List[Any] = '''Encode <mask>sequence'''
lowercase_ : Tuple = tokenizer.encode(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = encoded.index(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = tokenizer.encode(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = encoded.index(__SCREAMING_SNAKE_CASE )
lowercase_ : str = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
pass
def _snake_case ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowercase_ : str = '''A, <mask> AllenNLP sentence.'''
lowercase_ : Optional[int] = tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = tokenizer_p.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
lowercase_ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
lowercase_ : List[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def _snake_case ( self ):
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
lowercase_ : List[str] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
lowercase_ : List[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state['''add_prefix_space'''] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state['''trim_offsets'''] , __SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase_ : Optional[Any] = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
lowercase_ : int = F'''{text_of_1_token} {text_of_1_token}'''
lowercase_ : Dict = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
lowercase_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
lowercase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
lowercase_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
lowercase_ : List[str] = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
lowercase_ : str = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ) + 1, 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
lowercase_ : Any = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
lowercase_ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
lowercase_ : List[str] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
| 93 |
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = [0 for i in range(r + 1 )]
# nc0 = 1
lowerCamelCase_ = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
lowerCamelCase_ = min(lowerCamelCase__ , lowerCamelCase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=1_0, r=5))
| 19 | 0 |
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : str ):
"""simple docstring"""
a :int = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def __lowerCamelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] ):
"""simple docstring"""
a :Tuple = 0
while b > 0:
if b & 1:
a :List[str] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 94 |
import math
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(lowerCamelCase__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("This should never happen" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
__A ='''Enter the base and the power separated by a comma: '''
__A, __A =map(int, input(prompt).split(''','''))
__A, __A =map(int, input(prompt).split(''','''))
# We find the log of each number, using the function res(), which takes two
# arguments.
__A =res(xa, ya)
__A =res(xa, ya)
# We check for the largest number
if resa > resa:
print('''Largest number is''', xa, '''^''', ya)
elif resa > resa:
print('''Largest number is''', xa, '''^''', ya)
else:
print('''Both are equal''')
| 19 | 0 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __lowerCAmelCase :
_lowercase : Optional[int] = LEDConfig
_lowercase : Optional[Any] = {}
_lowercase : Optional[int] = """gelu"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=9_9 , lowerCAmelCase__=3_2 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=3_7 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=2_0 , lowerCAmelCase__=2 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=4 , ) -> int:
'''simple docstring'''
a__ : int =parent
a__ : int =batch_size
a__ : Dict =seq_length
a__ : str =is_training
a__ : Optional[int] =use_labels
a__ : Optional[Any] =vocab_size
a__ : List[str] =hidden_size
a__ : Dict =num_hidden_layers
a__ : str =num_attention_heads
a__ : str =intermediate_size
a__ : Dict =hidden_dropout_prob
a__ : Tuple =attention_probs_dropout_prob
a__ : List[str] =max_position_embeddings
a__ : Dict =eos_token_id
a__ : Tuple =pad_token_id
a__ : Optional[Any] =bos_token_id
a__ : int =attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
a__ : Union[str, Any] =self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
a__ : List[str] =(
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
a__ : Any =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
a__ : List[Any] =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
a__ : List[Any] =tf.concat([input_ids, eos_tensor] , axis=1 )
a__ : Any =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ : Tuple =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
a__ : int =prepare_led_inputs_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
a__ : str =tf.concat(
[tf.zeros_like(lowerCAmelCase__ )[:, :-1], tf.ones_like(lowerCAmelCase__ )[:, -1:]] , axis=-1 , )
a__ : Dict =global_attention_mask
return config, inputs_dict
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
a__ : Any =TFLEDModel(config=lowerCAmelCase__ ).get_decoder()
a__ : Tuple =inputs_dict["input_ids"]
a__ : Any =input_ids[:1, :]
a__ : List[str] =inputs_dict["attention_mask"][:1, :]
a__ : List[Any] =1
# first forward pass
a__ : Any =model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__ )
a__ , a__ : Optional[int] =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
a__ : Optional[Any] =ids_tensor((self.batch_size, 3) , config.vocab_size )
a__ : str =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
a__ : Optional[int] =tf.concat([input_ids, next_tokens] , axis=-1 )
a__ : List[str] =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
a__ : List[Any] =model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
a__ : Tuple =model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
a__ : Any =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
a__ : Optional[Any] =output_from_no_past[:, -3:, random_slice_idx]
a__ : int =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCAmelCase__ , lowerCAmelCase__ , rtol=1E-3 )
def _A ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : Optional[Any]=None , SCREAMING_SNAKE_CASE : Optional[Any]=None , ):
"""simple docstring"""
if attention_mask is None:
a__ : Optional[int] =tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
a__ : Any =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
a__ : int =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
a__ : Optional[int] =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase):
_lowercase : int = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_lowercase : Union[str, Any] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_lowercase : Optional[int] = (
{
"""conversational""": TFLEDForConditionalGeneration,
"""feature-extraction""": TFLEDModel,
"""summarization""": TFLEDForConditionalGeneration,
"""text2text-generation""": TFLEDForConditionalGeneration,
"""translation""": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowercase : Union[str, Any] = True
_lowercase : List[str] = False
_lowercase : List[str] = False
_lowercase : Dict = False
def _lowercase ( self ) -> int:
'''simple docstring'''
a__ : List[str] =TFLEDModelTester(self )
a__ : Any =ConfigTester(self , config_class=lowerCAmelCase__ )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : Any =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase__ )
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ , a__ : Any =self.model_tester.prepare_config_and_inputs_for_common()
a__ : List[str] =tf.zeros_like(inputs_dict["attention_mask"] )
a__ : Tuple =2
a__ : int =tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
a__ : Dict =True
a__ : List[Any] =self.model_tester.seq_length
a__ : Dict =self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowerCAmelCase__ ):
a__ : List[Any] =outputs.decoder_attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowerCAmelCase__ ):
a__ : Optional[int] =[t.numpy() for t in outputs.encoder_attentions]
a__ : Any =[t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
a__ : Optional[int] =True
a__ : List[Any] =False
a__ : Any =False
a__ : List[str] =model_class(lowerCAmelCase__ )
a__ : Optional[Any] =model(self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
a__ : Optional[int] =len(lowerCAmelCase__ )
self.assertEqual(config.output_hidden_states , lowerCAmelCase__ )
check_encoder_attentions_output(lowerCAmelCase__ )
if self.is_encoder_decoder:
a__ : Union[str, Any] =model_class(lowerCAmelCase__ )
a__ : Dict =model(self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(config.output_hidden_states , lowerCAmelCase__ )
check_decoder_attentions_output(lowerCAmelCase__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
a__ : str =True
a__ : Union[str, Any] =model_class(lowerCAmelCase__ )
a__ : Optional[Any] =model(self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(config.output_hidden_states , lowerCAmelCase__ )
check_encoder_attentions_output(lowerCAmelCase__ )
# Check attention is always last and order is fine
a__ : int =True
a__ : Tuple =True
a__ : Tuple =model_class(lowerCAmelCase__ )
a__ : Optional[Any] =model(self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowerCAmelCase__ ) )
self.assertEqual(model.config.output_hidden_states , lowerCAmelCase__ )
check_encoder_attentions_output(lowerCAmelCase__ )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def _lowercase ( self ) -> str:
'''simple docstring'''
pass
def _A ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
return tf.constant(SCREAMING_SNAKE_CASE , dtype=tf.intaa )
UpperCAmelCase : Dict = 1E-4
@slow
@require_tf
class __lowerCAmelCase ( unittest.TestCase):
def _lowercase ( self ) -> Dict:
'''simple docstring'''
a__ : Tuple =TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
a__ : Any =_long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
a__ : int =_long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
a__ : List[str] =prepare_led_inputs_dict(model.config , lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Tuple =model(**lowerCAmelCase__ )[0]
a__ : Optional[int] =(1, 1_0_2_4, 7_6_8)
self.assertEqual(output.shape , lowerCAmelCase__ )
# change to expected output here
a__ : Optional[int] =tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowerCAmelCase__ , atol=1E-3 )
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : Tuple =TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
a__ : Union[str, Any] =_long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
a__ : Dict =_long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
a__ : int =prepare_led_inputs_dict(model.config , lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Optional[int] =model(**lowerCAmelCase__ )[0]
a__ : Any =(1, 1_0_2_4, model.config.vocab_size)
self.assertEqual(output.shape , lowerCAmelCase__ )
# change to expected output here
a__ : Dict =tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowerCAmelCase__ , atol=1E-3 , rtol=1E-3 )
| 95 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
__A =logging.get_logger(__name__)
__A =list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
__A =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Model type selected in the list: ' + ', '.join(snake_case_ )} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowerCAmelCase__ = field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase__ = field(
default=1_28 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowerCAmelCase__ = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowerCAmelCase__ = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowerCAmelCase__ = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase__ = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase__ = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowerCAmelCase__ = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 'train'
lowerCAmelCase__ = 'dev'
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __init__( self , lowercase , lowercase , lowercase = None , lowercase = Split.train , lowercase = False , lowercase = None , lowercase = "pt" , ) -> List[str]:
lowerCamelCase_ = args
lowerCamelCase_ = is_language_sensitive
lowerCamelCase_ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowercase , lowercase ):
try:
lowerCamelCase_ = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
lowerCamelCase_ = mode
# Load data features from cache or dataset file
lowerCamelCase_ = "v2" if args.version_2_with_negative else "v1"
lowerCamelCase_ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase_ = cached_features_file + ".lock"
with FileLock(lowercase ):
if os.path.exists(lowercase ) and not args.overwrite_cache:
lowerCamelCase_ = time.time()
lowerCamelCase_ = torch.load(lowercase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowerCamelCase_ = self.old_features["features"]
lowerCamelCase_ = self.old_features.get("dataset" , lowercase )
lowerCamelCase_ = self.old_features.get("examples" , lowercase )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
" future run" )
else:
if mode == Split.dev:
lowerCamelCase_ = self.processor.get_dev_examples(args.data_dir )
else:
lowerCamelCase_ = self.processor.get_train_examples(args.data_dir )
lowerCamelCase_ , lowerCamelCase_ = squad_convert_examples_to_features(
examples=self.examples , tokenizer=lowercase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=lowercase , )
lowerCamelCase_ = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , lowercase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ) -> Tuple:
return len(self.features )
def __getitem__( self , lowercase ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
lowerCamelCase_ = self.features[i]
lowerCamelCase_ = torch.tensor(feature.input_ids , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.attention_mask , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.cls_index , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.p_mask , dtype=torch.float )
lowerCamelCase_ = torch.tensor(feature.is_impossible , dtype=torch.float )
lowerCamelCase_ = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowerCamelCase_ = torch.tensor(feature.start_position , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 19 | 0 |
"""simple docstring"""
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowercase__ = logging.get_logger(__name__)
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
def constraint_to_multiple_of(lowercase__ , lowercase__ , lowercase__=0 , lowercase__=None ):
_lowerCamelCase : List[Any] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_lowerCamelCase : Optional[int] = math.floor(val / multiple ) * multiple
if x < min_val:
_lowerCamelCase : List[str] = math.ceil(val / multiple ) * multiple
return x
_lowerCamelCase : Optional[int] = (output_size, output_size) if isinstance(lowercase__ , lowercase__ ) else output_size
_lowerCamelCase, _lowerCamelCase : List[str] = get_image_size(lowercase__ )
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = output_size
# determine new height and width
_lowerCamelCase : Dict = output_height / input_height
_lowerCamelCase : Any = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_lowerCamelCase : Tuple = scale_width
else:
# fit height
_lowerCamelCase : Any = scale_height
_lowerCamelCase : Optional[int] = constraint_to_multiple_of(scale_height * input_height , multiple=lowercase__ )
_lowerCamelCase : Any = constraint_to_multiple_of(scale_width * input_width , multiple=lowercase__ )
return (new_height, new_width)
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = ["""pixel_values"""]
def __init__( self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BILINEAR , lowercase = False , lowercase = 1 , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = None , **lowercase , ):
super().__init__(**lowercase )
_lowerCamelCase : Optional[int] = size if size is not None else {'height': 384, 'width': 384}
_lowerCamelCase : Union[str, Any] = get_size_dict(lowercase )
_lowerCamelCase : Optional[Any] = do_resize
_lowerCamelCase : List[Any] = size
_lowerCamelCase : Tuple = keep_aspect_ratio
_lowerCamelCase : List[Any] = ensure_multiple_of
_lowerCamelCase : str = resample
_lowerCamelCase : Optional[Any] = do_rescale
_lowerCamelCase : Union[str, Any] = rescale_factor
_lowerCamelCase : int = do_normalize
_lowerCamelCase : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCamelCase : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A_ ( self , lowercase , lowercase , lowercase = False , lowercase = 1 , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ):
_lowerCamelCase : str = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
_lowerCamelCase : Optional[Any] = get_resize_output_image_size(
lowercase , output_size=(size['height'], size['width']) , keep_aspect_ratio=lowercase , multiple=lowercase , )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def A_ ( self , lowercase , lowercase , lowercase = None , **lowercase , ):
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def A_ ( self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ):
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def A_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ):
_lowerCamelCase : List[str] = do_resize if do_resize is not None else self.do_resize
_lowerCamelCase : List[str] = size if size is not None else self.size
_lowerCamelCase : Tuple = get_size_dict(lowercase )
_lowerCamelCase : Any = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_lowerCamelCase : int = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_lowerCamelCase : int = resample if resample is not None else self.resample
_lowerCamelCase : int = do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
_lowerCamelCase : Dict = image_mean if image_mean is not None else self.image_mean
_lowerCamelCase : Tuple = image_std if image_std is not None else self.image_std
_lowerCamelCase : List[Any] = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
_lowerCamelCase : Tuple = [to_numpy_array(lowercase ) for image in images]
if do_resize:
_lowerCamelCase : List[Any] = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_rescale:
_lowerCamelCase : List[Any] = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
_lowerCamelCase : List[str] = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
_lowerCamelCase : Optional[Any] = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
_lowerCamelCase : Dict = {'pixel_values': images}
return BatchFeature(data=lowercase , tensor_type=lowercase )
def A_ ( self , lowercase , lowercase = None ):
_lowerCamelCase : int = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase ) != len(lowercase ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(lowercase ):
_lowerCamelCase : Union[str, Any] = target_sizes.numpy()
_lowerCamelCase : Union[str, Any] = []
for idx in range(len(lowercase ) ):
_lowerCamelCase : Tuple = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=lowercase )
_lowerCamelCase : Dict = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase )
else:
_lowerCamelCase : Dict = logits.argmax(dim=1 )
_lowerCamelCase : int = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 96 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
@staticmethod
@abstractmethod
def SCREAMING_SNAKE_CASE_( lowercase ) -> int:
raise NotImplementedError()
@abstractmethod
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
raise NotImplementedError()
| 19 | 0 |
'''simple docstring'''
import math
import sys
import cva
import numpy as np
def a ( __a , __a ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ :Optional[int] = math.sqrt(__a )
UpperCamelCase__ :Tuple = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def a ( __a , __a , __a , __a ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ :Optional[int] = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def a ( __a , __a ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ :Optional[int] = np.zeros((kernel_size, kernel_size) )
for i in range(0 , __a ):
for j in range(0 , __a ):
UpperCamelCase__ :Any = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(__a , __a )
def a ( __a , __a , __a , __a , ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = np.zeros(img.shape )
UpperCamelCase__ :str = get_gauss_kernel(__a , __a )
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
UpperCamelCase__ :str = get_slice(__a , __a , __a , __a )
UpperCamelCase__ :Any = img_s - img_s[kernel_size // 2, kernel_size // 2]
UpperCamelCase__ :Optional[Any] = vec_gaussian(__a , __a )
UpperCamelCase__ :int = np.multiply(__a , __a )
UpperCamelCase__ :Optional[Any] = np.multiply(__a , __a )
UpperCamelCase__ :Dict = np.sum(__a ) / np.sum(__a )
UpperCamelCase__ :List[str] = val
return imga
def a ( __a ) -> tuple:
'''simple docstring'''
UpperCamelCase__ :List[str] = args[1] if args[1:] else '''../image_data/lena.jpg'''
UpperCamelCase__ :Union[str, Any] = float(args[2] ) if args[2:] else 1.0
UpperCamelCase__ :Optional[int] = float(args[3] ) if args[3:] else 1.0
if args[4:]:
UpperCamelCase__ :Tuple = int(args[4] )
UpperCamelCase__ :Optional[Any] = kernel_size + abs(kernel_size % 2 - 1 )
else:
UpperCamelCase__ :Optional[Any] = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
__snake_case , __snake_case , __snake_case , __snake_case = parse_args(sys.argv)
__snake_case = cva.imread(filename, 0)
cva.imshow('''input image''', img)
__snake_case = img / 255
__snake_case = out.astype('''float32''')
__snake_case = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
__snake_case = out * 255
__snake_case = np.uinta(out)
cva.imshow('''output image''', out)
cva.waitKey(0)
cva.destroyAllWindows() | 97 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
__A =logging.get_logger(__name__) # pylint: disable=invalid-name
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> List[Any]:
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
lowerCamelCase_ = (
f'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'
f' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , lowercase , standard_warn=lowercase )
lowerCamelCase_ = dict(scheduler.config )
lowerCamelCase_ = 1
lowerCamelCase_ = FrozenDict(lowercase )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
lowerCamelCase_ = (
f'The configuration file of this scheduler: {scheduler} has not set the configuration'
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , lowercase , standard_warn=lowercase )
lowerCamelCase_ = dict(scheduler.config )
lowerCamelCase_ = True
lowerCamelCase_ = FrozenDict(lowercase )
if safety_checker is None:
logger.warning(
f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=lowercase , segmentation_processor=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , unet=lowercase , scheduler=lowercase , safety_checker=lowercase , feature_extractor=lowercase , )
def SCREAMING_SNAKE_CASE_( self , lowercase = "auto" ) -> Tuple:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
self.enable_attention_slicing(lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> str:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowerCamelCase_ = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(lowercase , lowercase )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , lowercase , lowercase , lowercase , lowercase = 512 , lowercase = 512 , lowercase = 50 , lowercase = 7.5 , lowercase = None , lowercase = 1 , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "pil" , lowercase = True , lowercase = None , lowercase = 1 , **lowercase , ) -> int:
lowerCamelCase_ = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
lowerCamelCase_ = self.segmentation_model(**lowercase )
lowerCamelCase_ = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowerCamelCase_ = self.numpy_to_pil(lowercase )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowerCamelCase_ = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=lowercase , image=lowercase , mask_image=lowercase , height=lowercase , width=lowercase , num_inference_steps=lowercase , guidance_scale=lowercase , negative_prompt=lowercase , num_images_per_prompt=lowercase , eta=lowercase , generator=lowercase , latents=lowercase , output_type=lowercase , return_dict=lowercase , callback=lowercase , callback_steps=lowercase , )
| 19 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ : Tuple = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowerCAmelCase__ : int = 250_004
lowerCAmelCase__ : Optional[int] = 250_020
@require_sentencepiece
@require_tokenizers
class snake_case ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = MBartTokenizer
snake_case__ = MBartTokenizerFast
snake_case__ = True
snake_case__ = True
def __lowerCAmelCase ( self : str ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ = MBartTokenizer(lowerCamelCase__ ,keep_accents=lowerCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = MBartTokenizer(lowerCamelCase__ ,keep_accents=lowerCamelCase__ )
UpperCAmelCase__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowerCamelCase__ ,['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,)
UpperCAmelCase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowerCamelCase__ ,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] ,)
UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] ,)
UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ ,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] ,)
def __lowerCAmelCase ( self : int ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCAmelCase__ = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase__ = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
UpperCAmelCase__ = self.tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = tokenizer_r.save_pretrained(lowerCamelCase__ )
UpperCAmelCase__ = tokenizer_p.save_pretrained(lowerCamelCase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
UpperCAmelCase__ = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(lowerCamelCase__ ,lowerCamelCase__ )
# Checks everything loads correctly in the same way
UpperCAmelCase__ = tokenizer_r.from_pretrained(lowerCamelCase__ )
UpperCAmelCase__ = tokenizer_p.from_pretrained(lowerCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase__ ,lowerCamelCase__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCamelCase__ )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = tokenizer_r.save_pretrained(lowerCamelCase__ ,legacy_format=lowerCamelCase__ )
UpperCAmelCase__ = tokenizer_p.save_pretrained(lowerCamelCase__ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase__ ,lowerCamelCase__ )
# Checks everything loads correctly in the same way
UpperCAmelCase__ = tokenizer_r.from_pretrained(lowerCamelCase__ )
UpperCAmelCase__ = tokenizer_p.from_pretrained(lowerCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase__ ,lowerCamelCase__ ) )
shutil.rmtree(lowerCamelCase__ )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = tokenizer_r.save_pretrained(lowerCamelCase__ ,legacy_format=lowerCamelCase__ )
UpperCAmelCase__ = tokenizer_p.save_pretrained(lowerCamelCase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase__ = tokenizer_r.from_pretrained(lowerCamelCase__ )
UpperCAmelCase__ = tokenizer_p.from_pretrained(lowerCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase__ ,lowerCamelCase__ ) )
shutil.rmtree(lowerCamelCase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case ( unittest.TestCase ):
"""simple docstring"""
snake_case__ = "facebook/mbart-large-en-ro"
snake_case__ = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
snake_case__ = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
snake_case__ = [82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2, EN_CODE]
@classmethod
def __lowerCAmelCase ( cls : int ):
UpperCAmelCase__ = MBartTokenizer.from_pretrained(
cls.checkpoint_name ,src_lang='en_XX' ,tgt_lang='ro_RO' )
UpperCAmelCase__ = 1
return cls
def __lowerCAmelCase ( self : int ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] ,250_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] ,250_004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] ,250_020 )
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens ,lowerCamelCase__ )
def __lowerCAmelCase ( self : str ):
self.assertIn(lowerCamelCase__ ,self.tokenizer.all_special_ids )
UpperCAmelCase__ = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2]
UpperCAmelCase__ = self.tokenizer.decode(lowerCamelCase__ ,skip_special_tokens=lowerCamelCase__ )
UpperCAmelCase__ = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
self.assertNotIn(self.tokenizer.eos_token ,lowerCamelCase__ )
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] ,lowerCamelCase__ )
UpperCAmelCase__ = 10
UpperCAmelCase__ = self.tokenizer(lowerCamelCase__ ,max_length=lowerCamelCase__ ,truncation=lowerCamelCase__ ).input_ids[0]
self.assertEqual(ids[-2] ,2 )
self.assertEqual(ids[-1] ,lowerCamelCase__ )
self.assertEqual(len(lowerCamelCase__ ) ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Union[str, Any] ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) ,[250_026, 250_001] )
def __lowerCAmelCase ( self : str ):
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCamelCase__ )
UpperCAmelCase__ = MBartTokenizer.from_pretrained(lowerCamelCase__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids ,lowerCamelCase__ )
@require_torch
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = self.tokenizer(self.src_text ,text_target=self.tgt_text ,padding=lowerCamelCase__ ,return_tensors='pt' )
UpperCAmelCase__ = shift_tokens_right(batch['labels'] ,self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = self.tokenizer(
self.src_text ,text_target=self.tgt_text ,padding=lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=len(self.expected_src_tokens ) ,return_tensors='pt' ,)
UpperCAmelCase__ = shift_tokens_right(batch['labels'] ,self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
self.assertEqual((2, 14) ,batch.input_ids.shape )
self.assertEqual((2, 14) ,batch.attention_mask.shape )
UpperCAmelCase__ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens ,lowerCamelCase__ )
self.assertEqual(2 ,batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens ,[] )
self.assertEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id, EN_CODE] )
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = self.tokenizer(self.src_text ,padding=lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=3 ,return_tensors='pt' )
UpperCAmelCase__ = self.tokenizer(
text_target=self.tgt_text ,padding=lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=10 ,return_tensors='pt' )
UpperCAmelCase__ = targets['input_ids']
UpperCAmelCase__ = shift_tokens_right(lowerCamelCase__ ,self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] ,3 )
self.assertEqual(batch.decoder_input_ids.shape[1] ,10 )
@require_torch
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = self.tokenizer._build_translation_inputs(
'A test' ,return_tensors='pt' ,src_lang='en_XX' ,tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(lowerCamelCase__ ) ,{
# A, test, EOS, en_XX
'input_ids': [[62, 3_034, 2, 250_004]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 250_001,
} ,)
| 98 |
from collections import deque
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = len(lowerCamelCase__ )
lowerCamelCase_ = deque()
lowerCamelCase_ = [False for _ in range(lowerCamelCase__ )]
lowerCamelCase_ = [-1 for _ in range(lowerCamelCase__ )]
lowerCamelCase_ = index_of[:]
def strong_connect(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = index # the number when this node is seen
lowerCamelCase_ = index # lowest rank node reachable from here
index += 1
stack.append(lowerCamelCase__ )
lowerCamelCase_ = True
for w in g[v]:
if index_of[w] == -1:
lowerCamelCase_ = strong_connect(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
lowerCamelCase_ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
lowerCamelCase_ = []
lowerCamelCase_ = stack.pop()
lowerCamelCase_ = False
component.append(lowerCamelCase__ )
while w != v:
lowerCamelCase_ = stack.pop()
lowerCamelCase_ = False
component.append(lowerCamelCase__ )
components.append(lowerCamelCase__ )
return index
lowerCamelCase_ = []
for v in range(lowerCamelCase__ ):
if index_of[v] == -1:
strong_connect(lowerCamelCase__ , 0 , lowerCamelCase__ )
return components
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = [[] for _ in range(lowerCamelCase__ )]
for u, v in edges:
g[u].append(lowerCamelCase__ )
return g
if __name__ == "__main__":
# Test
__A =7
__A =[0, 0, 1, 2, 3, 3, 4, 4, 6]
__A =[1, 3, 2, 0, 1, 4, 5, 6, 5]
__A =[(u, v) for u, v in zip(source, target)]
__A =create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 19 | 0 |
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def A_ ( A__ ) -> tuple:
return (data["data"], data["target"])
def A_ ( A__ , A__ , A__ ) -> np.ndarray:
a__ : int = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(A__ , A__ )
# Predict target for test data
a__ : Union[str, Any] = xgb.predict(A__ )
a__ : Optional[Any] = predictions.reshape(len(A__ ) , 1 )
return predictions
def A_ ( ) -> None:
a__ : List[str] = fetch_california_housing()
a__ , a__ : int = data_handling(A__ )
a__ , a__ , a__ , a__ : Optional[int] = train_test_split(
A__ , A__ , test_size=0.25 , random_state=1 )
a__ : Optional[Any] = xgboost(A__ , A__ , A__ )
# Error printing
print(F'Mean Absolute Error : {mean_absolute_error(A__ , A__ )}' )
print(F'Mean Square Error : {mean_squared_error(A__ , A__ )}' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 99 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A ={'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 19 | 0 |
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__magic_name__ = logging.get_logger(__name__)
@add_end_docstrings(__a )
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__)
requires_backends(self , """vision""")
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING)
def snake_case_ ( self , lowerCAmelCase__=None):
__SCREAMING_SNAKE_CASE = {}
if top_k is not None:
__SCREAMING_SNAKE_CASE = top_k
return {}, {}, postprocess_params
def __call__( self , lowerCAmelCase__ , **lowerCAmelCase__):
return super().__call__(lowerCAmelCase__ , **lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = load_image(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.image_processor(images=lowerCAmelCase__ , return_tensors=self.framework)
return model_inputs
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.model(**lowerCAmelCase__)
return model_outputs
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=5):
if top_k > self.model.config.num_labels:
__SCREAMING_SNAKE_CASE = self.model.config.num_labels
if self.framework == "pt":
__SCREAMING_SNAKE_CASE = model_outputs.logits.softmax(-1)[0]
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = probs.topk(lowerCAmelCase__)
elif self.framework == "tf":
__SCREAMING_SNAKE_CASE = stable_softmax(model_outputs.logits , axis=-1)[0]
__SCREAMING_SNAKE_CASE = tf.math.top_k(lowerCAmelCase__ , k=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f"Unsupported framework: {self.framework}")
__SCREAMING_SNAKE_CASE = scores.tolist()
__SCREAMING_SNAKE_CASE = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCAmelCase__ , lowerCAmelCase__)]
| 100 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A ={'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WavLMForAudioFrameClassification''',
'''WavLMForCTC''',
'''WavLMForSequenceClassification''',
'''WavLMForXVector''',
'''WavLMModel''',
'''WavLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 19 | 0 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowercase__ :List[str] = logging.get_logger(__name__)
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Optional[Any] =['''pixel_values''']
def __init__( self ,A__ = True ,A__ = 3_2 ,A__=PILImageResampling.BILINEAR ,A__ = True ,**A__ ,):
lowercase = do_resize
lowercase = do_rescale
lowercase = size_divisor
lowercase = resample
super().__init__(**A__)
def A__ ( self ,A__ ,A__ ,A__ ,A__ = None ,**A__):
lowercase , lowercase = get_image_size(A__)
# Rounds the height and width down to the closest multiple of size_divisor
lowercase = height // size_divisor * size_divisor
lowercase = width // size_divisor * size_divisor
lowercase = resize(A__ ,(new_h, new_w) ,resample=A__ ,data_format=A__ ,**A__)
return image
def A__ ( self ,A__ ,A__ ,A__ = None ,**A__):
return rescale(image=A__ ,scale=A__ ,data_format=A__ ,**A__)
def A__ ( self ,A__ ,A__ = None ,A__ = None ,A__=None ,A__ = None ,A__ = None ,A__ = ChannelDimension.FIRST ,**A__ ,):
lowercase = do_resize if do_resize is not None else self.do_resize
lowercase = do_rescale if do_rescale is not None else self.do_rescale
lowercase = size_divisor if size_divisor is not None else self.size_divisor
lowercase = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''')
lowercase = make_list_of_images(A__)
if not valid_images(A__):
raise ValueError('''Invalid image(s)''')
# All transformations expect numpy arrays.
lowercase = [to_numpy_array(A__) for img in images]
if do_resize:
lowercase = [self.resize(A__ ,size_divisor=A__ ,resample=A__) for image in images]
if do_rescale:
lowercase = [self.rescale(A__ ,scale=1 / 2_5_5) for image in images]
lowercase = [to_channel_dimension_format(A__ ,A__) for image in images]
lowercase = {'''pixel_values''': images}
return BatchFeature(data=A__ ,tensor_type=A__)
| 101 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__A ='''2.13.1'''
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('''3.7'''):
raise ImportWarning(
'''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'''
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'''
'''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'''
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__A =concatenate_datasets
__A =DownloadConfig
__A =DownloadManager
__A =DownloadMode
__A =DownloadConfig
__A =DownloadMode
__A =DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 19 | 0 |
"""simple docstring"""
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def lowercase ( _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = AlbertConfig.from_json_file(_snake_case )
print(f"""Building PyTorch model from configuration: {config}""" )
__snake_case : Tuple = AlbertForPreTraining(_snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_albert(_snake_case , _snake_case , _snake_case )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--albert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained ALBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 102 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A ={
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 19 | 0 |
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def UpperCamelCase( __UpperCamelCase : Optional[int] ):
lowerCAmelCase_ : Union[str, Any] = int(__UpperCamelCase )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = t // 3600, (t // 60) % 60, t % 60
return f"""{h}:{m:02d}:{s:02d}""" if h != 0 else f"""{m:02d}:{s:02d}"""
def UpperCamelCase( __UpperCamelCase : Dict ,__UpperCamelCase : Tuple ,__UpperCamelCase : List[str] ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : int=300 ):
# docstyle-ignore
return f"""
<div>
{prefix}
<progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>
{label}
</div>
"""
def UpperCamelCase( __UpperCamelCase : int ):
lowerCAmelCase_ : str = '''<table border="1" class="dataframe">\n'''
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f""" <th>{i}</th>\n"""
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
lowerCAmelCase_ : Optional[Any] = f"""{elt:.6f}""" if isinstance(__UpperCamelCase ,__UpperCamelCase ) else str(__UpperCamelCase )
html_code += f""" <td>{elt}</td>\n"""
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class __snake_case :
_a = 5
_a = 0.2
def __init__( self : Optional[Any] , A_ : int , A_ : Optional[str] = None , A_ : bool = True , A_ : Optional["NotebookTrainingTracker"] = None , A_ : int = 3_0_0 , ):
lowerCAmelCase_ : Union[str, Any] = total
lowerCAmelCase_ : int = '''''' if prefix is None else prefix
lowerCAmelCase_ : Optional[Any] = leave
lowerCAmelCase_ : Union[str, Any] = parent
lowerCAmelCase_ : Any = width
lowerCAmelCase_ : Optional[Any] = None
lowerCAmelCase_ : Tuple = None
lowerCAmelCase_ : str = None
def UpperCAmelCase__ ( self : Union[str, Any] , A_ : int , A_ : bool = False , A_ : str = None):
lowerCAmelCase_ : Dict = value
if comment is not None:
lowerCAmelCase_ : List[str] = comment
if self.last_value is None:
lowerCAmelCase_ : List[str] = time.time()
lowerCAmelCase_ : Dict = value
lowerCAmelCase_ : Union[str, Any] = None
lowerCAmelCase_ : str = self.warmup
lowerCAmelCase_ : List[Any] = 1
self.update_bar(A_)
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total):
if self.first_calls > 0:
self.first_calls -= 1
lowerCAmelCase_ : Tuple = time.time()
lowerCAmelCase_ : List[str] = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
lowerCAmelCase_ : int = self.elapsed_time / (value - self.start_value)
else:
lowerCAmelCase_ : str = None
if value >= self.total:
lowerCAmelCase_ : int = self.total
lowerCAmelCase_ : List[str] = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
lowerCAmelCase_ : List[str] = self.average_time_per_item * (self.total - value)
self.update_bar(A_)
lowerCAmelCase_ : Tuple = value
lowerCAmelCase_ : Optional[int] = current_time
if self.average_time_per_item is None:
lowerCAmelCase_ : List[Any] = 1
else:
lowerCAmelCase_ : Any = max(int(self.update_every / self.average_time_per_item) , 1)
def UpperCAmelCase__ ( self : int , A_ : Optional[int] , A_ : List[str]=None):
lowerCAmelCase_ : str = ''' ''' * (len(str(self.total)) - len(str(A_))) + str(A_)
if self.elapsed_time is None:
lowerCAmelCase_ : str = F"""[{spaced_value}/{self.total} : < :"""
elif self.predicted_remaining is None:
lowerCAmelCase_ : Tuple = F"""[{spaced_value}/{self.total} {format_time(self.elapsed_time)}"""
else:
lowerCAmelCase_ : Any = (
F"""[{spaced_value}/{self.total} {format_time(self.elapsed_time)} <"""
F""" {format_time(self.predicted_remaining)}"""
)
self.label += F""", {1/self.average_time_per_item:.2f} it/s"""
self.label += "]" if self.comment is None or len(self.comment) == 0 else F""", {self.comment}]"""
self.display()
def UpperCAmelCase__ ( self : Tuple):
lowerCAmelCase_ : Union[str, Any] = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width)
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
lowerCAmelCase_ : int = disp.display(disp.HTML(self.html_code) , display_id=A_)
else:
self.output.update(disp.HTML(self.html_code))
def UpperCAmelCase__ ( self : Optional[int]):
if self.parent is None and self.output is not None:
self.output.update(disp.HTML(''''''))
class __snake_case ( UpperCamelCase_ ):
def __init__( self : Union[str, Any] , A_ : Optional[int] , A_ : Dict=None):
super().__init__(A_)
lowerCAmelCase_ : int = None if column_names is None else [column_names]
lowerCAmelCase_ : Optional[int] = None
def UpperCAmelCase__ ( self : List[str]):
lowerCAmelCase_ : Optional[Any] = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width)
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table)
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
lowerCAmelCase_ : Optional[Any] = disp.display(disp.HTML(self.html_code) , display_id=A_)
else:
self.output.update(disp.HTML(self.html_code))
def UpperCAmelCase__ ( self : List[str] , A_ : Optional[int]):
if self.inner_table is None:
lowerCAmelCase_ : List[Any] = [list(values.keys()), list(values.values())]
else:
lowerCAmelCase_ : List[Any] = self.inner_table[0]
if len(self.inner_table) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(A_)
lowerCAmelCase_ : str = columns
self.inner_table.append([values[c] for c in columns])
def UpperCAmelCase__ ( self : Optional[Any] , A_ : Optional[Any] , A_ : Any=None , A_ : Optional[int]=3_0_0):
lowerCAmelCase_ : Any = NotebookProgressBar(A_ , prefix=A_ , parent=self , width=A_)
return self.child_bar
def UpperCAmelCase__ ( self : List[str]):
lowerCAmelCase_ : Dict = None
self.display()
class __snake_case ( UpperCamelCase_ ):
def __init__( self : Dict):
lowerCAmelCase_ : str = None
lowerCAmelCase_ : List[str] = None
lowerCAmelCase_ : Tuple = False
def UpperCAmelCase__ ( self : int , A_ : Optional[Any] , A_ : List[Any] , A_ : Union[str, Any] , **A_ : str):
lowerCAmelCase_ : str = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step'''
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : List[str] = [self.first_column] + ['''Training Loss''']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('''Validation Loss''')
lowerCAmelCase_ : str = NotebookTrainingTracker(state.max_steps , A_)
def UpperCAmelCase__ ( self : Optional[int] , A_ : Optional[Any] , A_ : Optional[int] , A_ : List[str] , **A_ : List[Any]):
lowerCAmelCase_ : int = int(state.epoch) if int(state.epoch) == state.epoch else F"""{state.epoch:.2f}"""
self.training_tracker.update(
state.global_step + 1 , comment=F"""Epoch {epoch}/{state.num_train_epochs}""" , force_update=self._force_next_update , )
lowerCAmelCase_ : Dict = False
def UpperCAmelCase__ ( self : Optional[Any] , A_ : List[str] , A_ : Optional[Any] , A_ : int , A_ : List[str]=None , **A_ : List[str]):
if not has_length(A_):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
lowerCAmelCase_ : int = self.training_tracker.add_child(len(A_))
else:
lowerCAmelCase_ : List[str] = NotebookProgressBar(len(A_))
self.prediction_bar.update(1)
else:
self.prediction_bar.update(self.prediction_bar.value + 1)
def UpperCAmelCase__ ( self : List[Any] , A_ : Optional[Any] , A_ : List[str] , A_ : Dict , **A_ : Tuple):
if self.prediction_bar is not None:
self.prediction_bar.close()
lowerCAmelCase_ : List[Any] = None
def UpperCAmelCase__ ( self : List[Any] , A_ : Optional[Any] , A_ : str , A_ : Any , A_ : Tuple=None , **A_ : str):
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
lowerCAmelCase_ : List[str] = {'''Training Loss''': logs['''loss''']}
# First column is necessarily Step sine we're not in epoch eval strategy
lowerCAmelCase_ : int = state.global_step
self.training_tracker.write_line(A_)
def UpperCAmelCase__ ( self : int , A_ : Union[str, Any] , A_ : Tuple , A_ : Tuple , A_ : Optional[Any]=None , **A_ : Optional[Any]):
if self.training_tracker is not None:
lowerCAmelCase_ : Any = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''}
for log in reversed(state.log_history):
if "loss" in log:
lowerCAmelCase_ : Dict = log['''loss''']
break
if self.first_column == "Epoch":
lowerCAmelCase_ : str = int(state.epoch)
else:
lowerCAmelCase_ : Optional[int] = state.global_step
lowerCAmelCase_ : Optional[Any] = '''eval'''
for k in metrics:
if k.endswith('''_loss'''):
lowerCAmelCase_ : str = re.sub(r'''\_loss$''' , '''''' , A_)
lowerCAmelCase_ : Any = metrics.pop('''total_flos''' , A_)
lowerCAmelCase_ : List[Any] = metrics.pop('''epoch''' , A_)
lowerCAmelCase_ : Optional[Any] = metrics.pop(F"""{metric_key_prefix}_runtime""" , A_)
lowerCAmelCase_ : List[Any] = metrics.pop(F"""{metric_key_prefix}_samples_per_second""" , A_)
lowerCAmelCase_ : List[Any] = metrics.pop(F"""{metric_key_prefix}_steps_per_second""" , A_)
lowerCAmelCase_ : Tuple = metrics.pop(F"""{metric_key_prefix}_jit_compilation_time""" , A_)
for k, v in metrics.items():
if k == F"""{metric_key_prefix}_loss""":
lowerCAmelCase_ : Optional[int] = v
else:
lowerCAmelCase_ : str = k.split('''_''')
lowerCAmelCase_ : List[str] = ''' '''.join([part.capitalize() for part in splits[1:]])
lowerCAmelCase_ : List[Any] = v
self.training_tracker.write_line(A_)
self.training_tracker.remove_child()
lowerCAmelCase_ : Any = None
# Evaluation takes a long time so we should force the next update.
lowerCAmelCase_ : str = True
def UpperCAmelCase__ ( self : Union[str, Any] , A_ : Optional[int] , A_ : Union[str, Any] , A_ : Any , **A_ : int):
self.training_tracker.update(
state.global_step , comment=F"""Epoch {int(state.epoch)}/{state.num_train_epochs}""" , force_update=A_)
lowerCAmelCase_ : str = None
| 103 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , ) -> Optional[int]:
lowerCamelCase_ = parent
lowerCamelCase_ = 13
lowerCamelCase_ = 7
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = 99
lowerCamelCase_ = 32
lowerCamelCase_ = 2
lowerCamelCase_ = 4
lowerCamelCase_ = 37
lowerCamelCase_ = "gelu"
lowerCamelCase_ = 0.1
lowerCamelCase_ = 0.1
lowerCamelCase_ = 512
lowerCamelCase_ = 16
lowerCamelCase_ = 2
lowerCamelCase_ = 0.0_2
lowerCamelCase_ = 3
lowerCamelCase_ = 4
lowerCamelCase_ = None
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = self.prepare_config_and_inputs()
lowerCamelCase_ = True
lowerCamelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Any:
lowerCamelCase_ = TFEsmModel(config=lowercase )
lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = [input_ids, input_mask]
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Tuple:
lowerCamelCase_ = True
lowerCamelCase_ = TFEsmModel(config=lowercase )
lowerCamelCase_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"encoder_hidden_states": encoder_hidden_states,
"encoder_attention_mask": encoder_attention_mask,
}
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = [input_ids, input_mask]
lowerCamelCase_ = model(lowercase , encoder_hidden_states=lowercase )
# Also check the case where encoder outputs are not passed
lowerCamelCase_ = model(lowercase , attention_mask=lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
lowerCamelCase_ = TFEsmForMaskedLM(config=lowercase )
lowerCamelCase_ = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = TFEsmForTokenClassification(config=lowercase )
lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
lowerCamelCase_ = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = TFEsmModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFEsmModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@unittest.skip("Protein models do not support embedding resizing." )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
pass
@unittest.skip("Protein models do not support embedding resizing." )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
pass
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(lowercase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCamelCase_ = model.get_bias()
assert isinstance(lowercase , lowercase )
for k, v in name.items():
assert isinstance(lowercase , tf.Variable )
else:
lowerCamelCase_ = model.get_output_embeddings()
assert x is None
lowerCamelCase_ = model.get_bias()
assert name is None
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
lowerCamelCase_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase_ = model(lowercase )[0]
lowerCamelCase_ = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , lowercase )
# compare the actual values for a slice.
lowerCamelCase_ = tf.constant(
[
[
[8.9_2_1_5_1_8, -1_0.5_8_9_8_1_4, -6.4_6_7_1_3_0_7],
[-6.3_9_6_7_1_5_6, -1_3.9_1_1_3_7_7, -1.1_2_1_1_9_1_5],
[-7.7_8_1_2_4_7, -1_3.9_5_1_5_5_7, -3.7_4_0_5_9_2],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
lowerCamelCase_ = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase_ = model(lowercase )[0]
# compare the actual values for a slice.
lowerCamelCase_ = tf.constant(
[
[
[0.1_4_4_4_3_0_9_2, 0.5_4_1_2_5_3_2_7, 0.3_2_4_7_7_3_9],
[0.3_0_3_4_0_4_8_4, 0.0_0_5_2_6_6_7_6, 0.3_1_0_7_7_7_2_2],
[0.3_2_2_7_8_0_4_3, -0.2_4_9_8_7_0_9_6, 0.3_4_1_4_6_2_8],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 19 | 0 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
lowerCAmelCase__ = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
lowerCAmelCase__ = json.load(f)
@require_torch
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Tuple ):
return FSMTTokenizer.from_pretrained(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Optional[int] ):
__lowercase = FSMTForConditionalGeneration.from_pretrained(lowercase__ ).to(lowercase__ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['''en-ru''', 2_6.0],
['''ru-en''', 2_2.0],
['''en-de''', 2_2.0],
['''de-en''', 2_9.0],
] )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : str ,lowercase__ : List[str] ):
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
__lowercase = F"facebook/wmt19-{pair}"
__lowercase = self.get_tokenizer(lowercase__ )
__lowercase = self.get_model(lowercase__ )
__lowercase = bleu_data[pair]['''src''']
__lowercase = bleu_data[pair]['''tgt''']
__lowercase = tokenizer(lowercase__ ,return_tensors='''pt''' ,truncation=lowercase__ ,padding='''longest''' ).to(lowercase__ )
__lowercase = model.generate(
input_ids=batch.input_ids ,num_beams=8 ,)
__lowercase = tokenizer.batch_decode(
lowercase__ ,skip_special_tokens=lowercase__ ,clean_up_tokenization_spaces=lowercase__ )
__lowercase = calculate_bleu(lowercase__ ,lowercase__ )
print(lowercase__ )
self.assertGreaterEqual(scores['''bleu'''] ,lowercase__ )
| 104 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
__A =logging.get_logger(__name__)
def lowerCamelCase_ ( ):
# Get the sagemaker specific mp parameters from smp_options variable.
lowerCamelCase_ = os.getenv("SM_HP_MP_PARAMETERS" , "{}" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
lowerCamelCase_ = json.loads(lowerCamelCase__ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
lowerCamelCase_ = os.getenv("SM_FRAMEWORK_PARAMS" , "{}" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
lowerCamelCase_ = json.loads(lowerCamelCase__ )
if not mpi_options.get("sagemaker_mpi_enabled" , lowerCamelCase__ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = field(
default='' , metadata={'help': 'Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'} , )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." , lowercase , )
@cached_property
def SCREAMING_SNAKE_CASE_( self ) -> "torch.device":
logger.info("PyTorch: setting up devices" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" )
if self.no_cuda:
lowerCamelCase_ = torch.device("cpu" )
lowerCamelCase_ = 0
elif is_sagemaker_model_parallel_available():
lowerCamelCase_ = smp.local_rank()
lowerCamelCase_ = torch.device("cuda" , lowercase )
lowerCamelCase_ = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta )
lowerCamelCase_ = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) )
lowerCamelCase_ = torch.device("cuda" , self.local_rank )
lowerCamelCase_ = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
lowerCamelCase_ = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
lowerCamelCase_ = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta )
lowerCamelCase_ = torch.device("cuda" , self.local_rank )
lowerCamelCase_ = 1
if device.type == "cuda":
torch.cuda.set_device(lowercase )
return device
@property
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
return not is_sagemaker_model_parallel_available()
@property
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
return False
| 19 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def _SCREAMING_SNAKE_CASE ( _lowercase : list ) ->int:
'''simple docstring'''
if not postfix_notation:
return 0
a : List[str] = {"+", "-", "*", "/"}
a : list[Any] = []
for token in postfix_notation:
if token in operations:
a, a : int = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(_lowercase ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 105 |
import math
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ = 0 , lowerCamelCase__ = 0 ):
lowerCamelCase_ = end or len(lowerCamelCase__ )
for i in range(lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = i
lowerCamelCase_ = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
lowerCamelCase_ = array[temp_index - 1]
temp_index -= 1
lowerCamelCase_ = temp_index_value
return array
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): # Max Heap
lowerCamelCase_ = index
lowerCamelCase_ = 2 * index + 1 # Left Node
lowerCamelCase_ = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
lowerCamelCase_ = left_index
if right_index < heap_size and array[largest] < array[right_index]:
lowerCamelCase_ = right_index
if largest != index:
lowerCamelCase_ , lowerCamelCase_ = array[largest], array[index]
heapify(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = len(lowerCamelCase__ )
for i in range(n // 2 , -1 , -1 ):
heapify(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
for i in range(n - 1 , 0 , -1 ):
lowerCamelCase_ , lowerCamelCase_ = array[0], array[i]
heapify(lowerCamelCase__ , 0 , lowerCamelCase__ )
return array
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = low
lowerCamelCase_ = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
lowerCamelCase_ , lowerCamelCase_ = array[j], array[i]
i += 1
def lowerCamelCase_ ( lowerCamelCase__ ):
if len(lowerCamelCase__ ) == 0:
return array
lowerCamelCase_ = 2 * math.ceil(math.loga(len(lowerCamelCase__ ) ) )
lowerCamelCase_ = 1_6
return intro_sort(lowerCamelCase__ , 0 , len(lowerCamelCase__ ) , lowerCamelCase__ , lowerCamelCase__ )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(lowerCamelCase__ )
max_depth -= 1
lowerCamelCase_ = median_of_a(lowerCamelCase__ , lowerCamelCase__ , start + ((end - start) // 2) + 1 , end - 1 )
lowerCamelCase_ = partition(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
intro_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = p
return insertion_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
__A =input('''Enter numbers separated by a comma : ''').strip()
__A =[float(item) for item in user_input.split(''',''')]
print(sort(unsorted))
| 19 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowercase__ = LEDConfig
lowercase__ = {}
lowercase__ = "gelu"
def __init__( self : Any ,lowercase_ : Optional[Any] ,lowercase_ : Optional[int]=1_3 ,lowercase_ : List[str]=7 ,lowercase_ : Any=True ,lowercase_ : Tuple=False ,lowercase_ : Optional[int]=9_9 ,lowercase_ : str=3_2 ,lowercase_ : Dict=2 ,lowercase_ : Dict=4 ,lowercase_ : int=3_7 ,lowercase_ : Optional[int]=0.1 ,lowercase_ : Tuple=0.1 ,lowercase_ : str=2_0 ,lowercase_ : Tuple=2 ,lowercase_ : Dict=1 ,lowercase_ : List[Any]=0 ,lowercase_ : List[str]=4 ,):
lowerCAmelCase__ : Tuple = parent
lowerCAmelCase__ : int = batch_size
lowerCAmelCase__ : str = seq_length
lowerCAmelCase__ : List[str] = is_training
lowerCAmelCase__ : List[str] = use_labels
lowerCAmelCase__ : str = vocab_size
lowerCAmelCase__ : Tuple = hidden_size
lowerCAmelCase__ : Dict = num_hidden_layers
lowerCAmelCase__ : List[str] = num_attention_heads
lowerCAmelCase__ : Any = intermediate_size
lowerCAmelCase__ : Any = hidden_dropout_prob
lowerCAmelCase__ : List[str] = attention_probs_dropout_prob
lowerCAmelCase__ : Optional[Any] = max_position_embeddings
lowerCAmelCase__ : List[Any] = eos_token_id
lowerCAmelCase__ : Optional[int] = pad_token_id
lowerCAmelCase__ : Any = bos_token_id
lowerCAmelCase__ : Any = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
lowerCAmelCase__ : List[str] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
lowerCAmelCase__ : Optional[Any] = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __lowerCAmelCase ( self : int ):
lowerCAmelCase__ : str = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
lowerCAmelCase__ : Any = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
lowerCAmelCase__ : Dict = tf.concat([input_ids, eos_tensor] ,axis=1 )
lowerCAmelCase__ : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowerCAmelCase__ : int = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,attention_window=self.attention_window ,**self.config_updates ,)
lowerCAmelCase__ : List[Any] = prepare_led_inputs_dict(lowercase_ ,lowercase_ ,lowercase_ )
lowerCAmelCase__ : Any = tf.concat(
[tf.zeros_like(lowercase_ )[:, :-1], tf.ones_like(lowercase_ )[:, -1:]] ,axis=-1 ,)
lowerCAmelCase__ : Optional[int] = global_attention_mask
return config, inputs_dict
def __lowerCAmelCase ( self : Any ,lowercase_ : str ,lowercase_ : Any ):
lowerCAmelCase__ : Optional[int] = TFLEDModel(config=lowercase_ ).get_decoder()
lowerCAmelCase__ : Tuple = inputs_dict['''input_ids''']
lowerCAmelCase__ : Optional[int] = input_ids[:1, :]
lowerCAmelCase__ : Optional[Any] = inputs_dict['''attention_mask'''][:1, :]
lowerCAmelCase__ : str = 1
# first forward pass
lowerCAmelCase__ : List[Any] = model(lowercase_ ,attention_mask=lowercase_ ,use_cache=lowercase_ )
lowerCAmelCase__ ,lowerCAmelCase__ : Tuple = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCAmelCase__ : int = ids_tensor((self.batch_size, 3) ,config.vocab_size )
lowerCAmelCase__ : Union[str, Any] = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta )
# append to next input_ids and
lowerCAmelCase__ : List[str] = tf.concat([input_ids, next_tokens] ,axis=-1 )
lowerCAmelCase__ : Optional[int] = tf.concat([attention_mask, next_attn_mask] ,axis=-1 )
lowerCAmelCase__ : int = model(lowercase_ ,attention_mask=lowercase_ )[0]
lowerCAmelCase__ : int = model(lowercase_ ,attention_mask=lowercase_ ,past_key_values=lowercase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] )
# select random slice
lowerCAmelCase__ : Union[str, Any] = int(ids_tensor((1,) ,output_from_past.shape[-1] ) )
lowerCAmelCase__ : List[Any] = output_from_no_past[:, -3:, random_slice_idx]
lowerCAmelCase__ : Any = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase_ ,lowercase_ ,rtol=1E-3 )
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_=None , A_=None , A_=None , A_=None , ):
if attention_mask is None:
lowerCAmelCase__ : Optional[Any] = tf.cast(tf.math.not_equal(A_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCAmelCase__ : Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCAmelCase__ : int = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCAmelCase__ : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
lowercase__ = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
lowercase__ = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
lowercase__ = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase__ = True
lowercase__ = False
lowercase__ = False
lowercase__ = False
def __lowerCAmelCase ( self : List[str] ):
lowerCAmelCase__ : int = TFLEDModelTester(self )
lowerCAmelCase__ : Union[str, Any] = ConfigTester(self ,config_class=lowercase_ )
def __lowerCAmelCase ( self : Dict ):
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : str ):
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ )
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ ,lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Any = tf.zeros_like(inputs_dict['''attention_mask'''] )
lowerCAmelCase__ : str = 2
lowerCAmelCase__ : Dict = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices ,1 ,inputs_dict['''global_attention_mask'''] ,)
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : Dict = self.model_tester.seq_length
lowerCAmelCase__ : List[Any] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowercase_ : List[str] ):
lowerCAmelCase__ : str = outputs.decoder_attentions
self.assertEqual(len(lowercase_ ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_length, seq_length] ,)
def check_encoder_attentions_output(lowercase_ : Optional[Any] ):
lowerCAmelCase__ : Optional[Any] = [t.numpy() for t in outputs.encoder_attentions]
lowerCAmelCase__ : str = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowercase_ ) ,self.model_tester.num_hidden_layers )
self.assertEqual(len(lowercase_ ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_length, seq_length] ,)
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] ,)
for model_class in self.all_model_classes:
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : Dict = False
lowerCAmelCase__ : Optional[Any] = False
lowerCAmelCase__ : Tuple = model_class(lowercase_ )
lowerCAmelCase__ : List[str] = model(self._prepare_for_class(lowercase_ ,lowercase_ ) )
lowerCAmelCase__ : Dict = len(lowercase_ )
self.assertEqual(config.output_hidden_states ,lowercase_ )
check_encoder_attentions_output(lowercase_ )
if self.is_encoder_decoder:
lowerCAmelCase__ : Tuple = model_class(lowercase_ )
lowerCAmelCase__ : Any = model(self._prepare_for_class(lowercase_ ,lowercase_ ) )
self.assertEqual(config.output_hidden_states ,lowercase_ )
check_decoder_attentions_output(lowercase_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowerCAmelCase__ : List[Any] = True
lowerCAmelCase__ : Optional[int] = model_class(lowercase_ )
lowerCAmelCase__ : List[Any] = model(self._prepare_for_class(lowercase_ ,lowercase_ ) )
self.assertEqual(config.output_hidden_states ,lowercase_ )
check_encoder_attentions_output(lowercase_ )
# Check attention is always last and order is fine
lowerCAmelCase__ : Any = True
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : str = model_class(lowercase_ )
lowerCAmelCase__ : str = model(self._prepare_for_class(lowercase_ ,lowercase_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) ,len(lowercase_ ) )
self.assertEqual(model.config.output_hidden_states ,lowercase_ )
check_encoder_attentions_output(lowercase_ )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def __lowerCAmelCase ( self : Dict ):
pass
def __lowerCAmelCase ( self : Dict ):
# TODO: Head-masking not yet implement
pass
def __SCREAMING_SNAKE_CASE ( A_ ):
return tf.constant(A_ , dtype=tf.intaa )
__UpperCamelCase : Any = 1e-4
@slow
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Dict ):
lowerCAmelCase__ : Tuple = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
lowerCAmelCase__ : Tuple = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
lowerCAmelCase__ : Any = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
lowerCAmelCase__ : str = prepare_led_inputs_dict(model.config ,lowercase_ ,lowercase_ )
lowerCAmelCase__ : str = model(**lowercase_ )[0]
lowerCAmelCase__ : str = (1, 1_0_2_4, 7_6_8)
self.assertEqual(output.shape ,lowercase_ )
# change to expected output here
lowerCAmelCase__ : List[str] = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] ,)
tf.debugging.assert_near(output[:, :3, :3] ,lowercase_ ,atol=1E-3 )
def __lowerCAmelCase ( self : int ):
lowerCAmelCase__ : Any = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
lowerCAmelCase__ : List[str] = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
lowerCAmelCase__ : Dict = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
lowerCAmelCase__ : Any = prepare_led_inputs_dict(model.config ,lowercase_ ,lowercase_ )
lowerCAmelCase__ : List[str] = model(**lowercase_ )[0]
lowerCAmelCase__ : Any = (1, 1_0_2_4, model.config.vocab_size)
self.assertEqual(output.shape ,lowercase_ )
# change to expected output here
lowerCAmelCase__ : int = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] ,)
tf.debugging.assert_near(output[:, :3, :3] ,lowercase_ ,atol=1E-3 ,rtol=1E-3 )
| 106 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , *lowercase , lowercase=None , lowercase=None , **lowercase ) -> List[str]:
super().__init__(*lowercase , **lowercase )
lowerCamelCase_ = eval_examples
lowerCamelCase_ = post_process_function
def SCREAMING_SNAKE_CASE_( self , lowercase = None , lowercase=None , lowercase = None , lowercase = "eval" , **lowercase , ) -> Dict[str, float]:
lowerCamelCase_ = gen_kwargs.copy()
lowerCamelCase_ = (
gen_kwargs["max_length"] if gen_kwargs.get("max_length" ) is not None else self.args.generation_max_length
)
lowerCamelCase_ = (
gen_kwargs["num_beams"] if gen_kwargs.get("num_beams" ) is not None else self.args.generation_num_beams
)
lowerCamelCase_ = gen_kwargs
lowerCamelCase_ = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCamelCase_ = self.get_eval_dataloader(lowercase )
lowerCamelCase_ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase_ = self.compute_metrics
lowerCamelCase_ = None
lowerCamelCase_ = time.time()
lowerCamelCase_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCamelCase_ = eval_loop(
lowercase , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase , metric_key_prefix=lowercase , )
finally:
lowerCamelCase_ = compute_metrics
lowerCamelCase_ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
lowercase , lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowerCamelCase_ = self.post_process_function(lowercase , lowercase , lowercase )
lowerCamelCase_ = self.compute_metrics(lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowerCamelCase_ = metrics.pop(lowercase )
metrics.update(output.metrics )
else:
lowerCamelCase_ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowercase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowerCamelCase_ = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase )
return metrics
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase=None , lowercase = "test" , **lowercase ) -> Union[str, Any]:
lowerCamelCase_ = gen_kwargs.copy()
lowerCamelCase_ = self.get_test_dataloader(lowercase )
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase_ = self.compute_metrics
lowerCamelCase_ = None
lowerCamelCase_ = time.time()
lowerCamelCase_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCamelCase_ = eval_loop(
lowercase , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase , metric_key_prefix=lowercase , )
finally:
lowerCamelCase_ = compute_metrics
lowerCamelCase_ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
lowercase , lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCamelCase_ = self.post_process_function(lowercase , lowercase , lowercase , "predict" )
lowerCamelCase_ = self.compute_metrics(lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowerCamelCase_ = metrics.pop(lowercase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase )
| 19 | 0 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class snake_case__ :
"""simple docstring"""
def __init__( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[Any]=13 , __lowerCamelCase : Tuple=10 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : List[Any]=2 , __lowerCamelCase : List[Any]=2 , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Union[str, Any]=32 , __lowerCamelCase : Any=5 , __lowerCamelCase : str=4 , __lowerCamelCase : Optional[Any]=37 , __lowerCamelCase : str="gelu" , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : Dict=10 , __lowerCamelCase : Optional[int]=0.02 , __lowerCamelCase : Optional[int]="divided_space_time" , __lowerCamelCase : Tuple=None , ) -> Optional[int]:
a = parent
a = batch_size
a = image_size
a = num_channels
a = patch_size
a = num_frames
a = is_training
a = use_labels
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = attention_type
a = initializer_range
a = scope
a = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
a = (image_size // patch_size) ** 2
a = (num_frames) * self.num_patches_per_frame + 1
def __UpperCAmelCase ( self : Dict ) -> Dict:
a = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.num_labels )
a = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self : Any ) -> List[str]:
a = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
a = self.num_labels
return config
def __UpperCAmelCase ( self : Any , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] ) -> Optional[Any]:
a = TimesformerModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : Dict ) -> Tuple:
a = TimesformerForVideoClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase )
# verify the logits shape
a = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , __lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
a = self.prepare_config_and_inputs()
a , a , a = config_and_inputs
a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ (_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : str = (
{"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Tuple = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : Any = False
SCREAMING_SNAKE_CASE_ : str = False
def __UpperCAmelCase ( self : List[Any] ) -> Any:
a = TimesformerModelTester(self )
a = ConfigTester(
self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def __UpperCAmelCase ( self : Any , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : Any=False ) -> Any:
a = copy.deepcopy(__lowerCamelCase )
if return_labels:
if model_class in get_values(__lowerCamelCase ):
a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase )
return inputs_dict
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason="TimeSformer does not use inputs_embeds" )
def __UpperCAmelCase ( self : List[Any] ) -> int:
pass
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(__lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def __UpperCAmelCase ( self : int ) -> Tuple:
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(__lowerCamelCase )
a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a = [*signature.parameters.keys()]
a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def __UpperCAmelCase ( self : List[str] ) -> str:
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*__lowerCamelCase )
@slow
def __UpperCAmelCase ( self : int ) -> List[str]:
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a = TimesformerModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
if not self.has_attentions:
pass
else:
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = True
for model_class in self.all_model_classes:
a = self.model_tester.seq_length
a = self.model_tester.num_frames
a = True
a = False
a = True
a = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
a = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
a = outputs.attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
a = True
a = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
a = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
a = outputs.attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
a = len(__lowerCamelCase )
# Check attention is always last and order is fine
a = True
a = True
a = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
a = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(out_len + 1 , len(__lowerCamelCase ) )
a = outputs.attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
def check_hidden_states_output(__lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : Any ):
a = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
a = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
a = outputs.hidden_states
a = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
a = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def __magic_name__ ( ):
'''simple docstring'''
a = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video", filename="eating_spaghetti.npy", repo_type="dataset" )
a = np.load(A )
return list(A )
@require_torch
@require_vision
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __UpperCAmelCase ( self : int ) -> List[Any]:
a = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to(
__lowerCamelCase )
a = self.default_image_processor
a = prepare_video()
a = image_processor(video[:8] , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
a = model(**__lowerCamelCase )
# verify the logits
a = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
a = torch.tensor([-0.3_016, -0.7_713, -0.4_205] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
| 107 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__A =logging.get_logger(__name__) # pylint: disable=invalid-name
__A ='''
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
>>> repo = "openai/shap-e-img2img"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"
>>> image = load_image(image_url).convert("RGB")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], "corgi_3d.gif")
```
'''
@dataclass
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 42
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> List[str]:
super().__init__()
self.register_modules(
prior=lowercase , image_encoder=lowercase , image_processor=lowercase , scheduler=lowercase , renderer=lowercase , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
if latents is None:
lowerCamelCase_ = randn_tensor(lowercase , generator=lowercase , device=lowercase , dtype=lowercase )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowerCamelCase_ = latents.to(lowercase )
lowerCamelCase_ = latents * scheduler.init_noise_sigma
return latents
def SCREAMING_SNAKE_CASE_( self , lowercase=0 ) -> int:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowerCamelCase_ = torch.device(f'cuda:{gpu_id}' )
lowerCamelCase_ = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase , lowercase )
@property
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder , "_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(lowercase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , ) -> List[str]:
if isinstance(lowercase , lowercase ) and isinstance(image[0] , torch.Tensor ):
lowerCamelCase_ = torch.cat(lowercase , axis=0 ) if image[0].ndim == 4 else torch.stack(lowercase , axis=0 )
if not isinstance(lowercase , torch.Tensor ):
lowerCamelCase_ = self.image_processor(lowercase , return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
lowerCamelCase_ = image.to(dtype=self.image_encoder.dtype , device=lowercase )
lowerCamelCase_ = self.image_encoder(lowercase )["last_hidden_state"]
lowerCamelCase_ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowerCamelCase_ = image_embeds.repeat_interleave(lowercase , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase_ = torch.zeros_like(lowercase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase_ = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(lowercase )
def __call__( self , lowercase , lowercase = 1 , lowercase = 25 , lowercase = None , lowercase = None , lowercase = 4.0 , lowercase = 64 , lowercase = "pil" , lowercase = True , ) -> Union[str, Any]:
if isinstance(lowercase , PIL.Image.Image ):
lowerCamelCase_ = 1
elif isinstance(lowercase , torch.Tensor ):
lowerCamelCase_ = image.shape[0]
elif isinstance(lowercase , lowercase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowerCamelCase_ = len(lowercase )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(lowercase )}' )
lowerCamelCase_ = self._execution_device
lowerCamelCase_ = batch_size * num_images_per_prompt
lowerCamelCase_ = guidance_scale > 1.0
lowerCamelCase_ = self._encode_image(lowercase , lowercase , lowercase , lowercase )
# prior
self.scheduler.set_timesteps(lowercase , device=lowercase )
lowerCamelCase_ = self.scheduler.timesteps
lowerCamelCase_ = self.prior.config.num_embeddings
lowerCamelCase_ = self.prior.config.embedding_dim
lowerCamelCase_ = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , lowercase , lowercase , lowercase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowerCamelCase_ = latents.reshape(latents.shape[0] , lowercase , lowercase )
for i, t in enumerate(self.progress_bar(lowercase ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ = self.scheduler.scale_model_input(lowercase , lowercase )
lowerCamelCase_ = self.prior(
lowercase , timestep=lowercase , proj_embedding=lowercase , ).predicted_image_embedding
# remove the variance
lowerCamelCase_ , lowerCamelCase_ = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowerCamelCase_ , lowerCamelCase_ = noise_pred.chunk(2 )
lowerCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowerCamelCase_ = self.scheduler.step(
lowercase , timestep=lowercase , sample=lowercase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=lowercase )
lowerCamelCase_ = []
for i, latent in enumerate(lowercase ):
print()
lowerCamelCase_ = self.renderer.decode(
latent[None, :] , lowercase , size=lowercase , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(lowercase )
lowerCamelCase_ = torch.stack(lowercase )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
lowerCamelCase_ = images.cpu().numpy()
if output_type == "pil":
lowerCamelCase_ = [self.numpy_to_pil(lowercase ) for image in images]
# Offload last model to CPU
if hasattr(self , "final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=lowercase )
| 19 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Any = size
# approximate the overall size of segment tree with given value
lowerCAmelCase : Optional[int] = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
lowerCAmelCase : List[str] = [0 for i in range(0 , 4 * size )]
lowerCAmelCase : Dict = [0 for i in range(0 , 4 * size )] # flag for lazy update
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
return idx * 2
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
return idx * 2 + 1
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
if left_element == right_element:
lowerCAmelCase : List[str] = a[left_element - 1]
else:
lowerCAmelCase : Tuple = (left_element + right_element) // 2
self.build(self.left(snake_case__ ) , snake_case__ , snake_case__ , snake_case__ )
self.build(self.right(snake_case__ ) , mid + 1 , snake_case__ , snake_case__ )
lowerCAmelCase : Tuple = max(
self.segment_tree[self.left(snake_case__ )] , self.segment_tree[self.right(snake_case__ )] )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
if self.flag[idx] is True:
lowerCAmelCase : Optional[int] = self.lazy[idx]
lowerCAmelCase : List[str] = False
if left_element != right_element:
lowerCAmelCase : Optional[Any] = self.lazy[idx]
lowerCAmelCase : List[Any] = self.lazy[idx]
lowerCAmelCase : List[Any] = True
lowerCAmelCase : Optional[Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
lowerCAmelCase : str = val
if left_element != right_element:
lowerCAmelCase : Optional[Any] = val
lowerCAmelCase : Union[str, Any] = val
lowerCAmelCase : int = True
lowerCAmelCase : int = True
return True
lowerCAmelCase : List[str] = (left_element + right_element) // 2
self.update(self.left(snake_case__ ) , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
self.update(self.right(snake_case__ ) , mid + 1 , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase : Optional[int] = max(
self.segment_tree[self.left(snake_case__ )] , self.segment_tree[self.right(snake_case__ )] )
return True
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
if self.flag[idx] is True:
lowerCAmelCase : List[Any] = self.lazy[idx]
lowerCAmelCase : str = False
if left_element != right_element:
lowerCAmelCase : Tuple = self.lazy[idx]
lowerCAmelCase : List[Any] = self.lazy[idx]
lowerCAmelCase : Optional[int] = True
lowerCAmelCase : str = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
lowerCAmelCase : Any = (left_element + right_element) // 2
lowerCAmelCase : Optional[int] = self.query(self.left(snake_case__ ) , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase : Dict = self.query(self.right(snake_case__ ) , mid + 1 , snake_case__ , snake_case__ , snake_case__ )
return max(snake_case__ , snake_case__ )
def __str__( self ):
"""simple docstring"""
return str([self.query(1 , 1 , self.size , snake_case__ , snake_case__ ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
lowerCAmelCase__ = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
lowerCAmelCase__ = 15
lowerCAmelCase__ = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 108 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowerCamelCase_ ( ):
lowerCamelCase_ , lowerCamelCase_ = 9, 1_4 # noqa: F841
lowerCamelCase_ = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 1_4],
[3, 4, 9],
[5, 4, 1_0],
[1, 7, 1_1],
]
lowerCamelCase_ = defaultdict(lowerCamelCase__ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
lowerCamelCase_ = mst(lowerCamelCase__ )
lowerCamelCase_ = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
lowerCamelCase_ = tuple(answer[:2] )
lowerCamelCase_ = tuple(edge[::-1] )
assert edge in result or reverse in result
| 19 | 0 |
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def _snake_case ( UpperCamelCase : int = 1000000 , UpperCamelCase : int = 10 ):
UpperCAmelCase : defaultdict = defaultdict(UpperCamelCase )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
UpperCAmelCase : str = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
UpperCAmelCase : Optional[Any] = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(UpperCamelCase , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 109 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__A =1_6
__A =3_2
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ = 1_6 ):
lowerCamelCase_ = AutoTokenizer.from_pretrained("bert-base-cased" )
lowerCamelCase_ = load_dataset("glue" , "mrpc" )
def tokenize_function(lowerCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase_ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase_ = datasets.map(
lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase_ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowerCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase_ = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase_ = 1_6
elif accelerator.mixed_precision != "no":
lowerCamelCase_ = 8
else:
lowerCamelCase_ = None
return tokenizer.pad(
lowerCamelCase__ , padding="longest" , max_length=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_tensors="pt" , )
# Instantiate dataloaders.
lowerCamelCase_ = DataLoader(
tokenized_datasets["train"] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
lowerCamelCase_ = DataLoader(
tokenized_datasets["validation"] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__A =mocked_dataloaders # noqa: F811
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowerCamelCase__ ) == "1":
lowerCamelCase_ = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
lowerCamelCase_ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
lowerCamelCase_ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase_ = config["lr"]
lowerCamelCase_ = int(config["num_epochs"] )
lowerCamelCase_ = int(config["seed"] )
lowerCamelCase_ = int(config["batch_size"] )
set_seed(lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ = get_dataloaders(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
lowerCamelCase_ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCamelCase_ = batch_size // MAX_GPU_BATCH_SIZE
lowerCamelCase_ = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase_ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowerCamelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase_ = model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase_ = AdamW(params=model.parameters() , lr=lowerCamelCase__ )
# Instantiate scheduler
lowerCamelCase_ = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowerCamelCase__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
lowerCamelCase_ = os.path.split(lowerCamelCase__ )[-1].split("." )[0]
accelerator.init_trackers(lowerCamelCase__ , lowerCamelCase__ )
# Now we train the model
for epoch in range(lowerCamelCase__ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
lowerCamelCase_ = 0
for step, batch in enumerate(lowerCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCamelCase_ = model(**lowerCamelCase__ )
lowerCamelCase_ = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
lowerCamelCase_ = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase_ = model(**lowerCamelCase__ )
lowerCamelCase_ = outputs.logits.argmax(dim=-1 )
lowerCamelCase_ , lowerCamelCase_ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=lowerCamelCase__ , references=lowerCamelCase__ , )
lowerCamelCase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , lowerCamelCase__ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"accuracy": eval_metric["accuracy"],
"f1": eval_metric["f1"],
"train_loss": total_loss.item() / len(lowerCamelCase__ ),
"epoch": epoch,
} , step=lowerCamelCase__ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def lowerCamelCase_ ( ):
lowerCamelCase_ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowerCamelCase__ , default=lowerCamelCase__ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=lowerCamelCase__ , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = {"lr": 2e-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6}
training_function(lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
main()
| 19 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _a ( UpperCamelCase__ ):
def __init__( self: str , UpperCamelCase_: str , UpperCamelCase_: int ) -> Optional[int]:
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
lowercase__ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
@torch.no_grad()
def __call__( self: int , UpperCamelCase_: int = 1 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: float = 0.0 , UpperCamelCase_: int = 50 , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
if isinstance(self.unet.config.sample_size , UpperCamelCase_ ):
lowercase__ = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
lowercase__ = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(UpperCamelCase_ )}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
lowercase__ = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(UpperCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowercase__ = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowercase__ = self.scheduler.step(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , eta=UpperCamelCase_ , use_clipped_model_output=UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
lowercase__ = (image / 2 + 0.5).clamp(0 , 1 )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 110 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__A =None
__A =logging.get_logger(__name__)
__A ={'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__A ={
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
__A ={
'''facebook/mbart-large-en-ro''': 1_0_2_4,
'''facebook/mbart-large-cc25''': 1_0_2_4,
}
# fmt: off
__A =['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = ['input_ids', 'attention_mask']
lowerCAmelCase__ = MBartTokenizer
lowerCAmelCase__ = []
lowerCAmelCase__ = []
def __init__( self , lowercase=None , lowercase=None , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=None , lowercase=None , lowercase=None , **lowercase , ) -> Dict:
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else mask_token
super().__init__(
vocab_file=lowercase , tokenizer_file=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , src_lang=lowercase , tgt_lang=lowercase , additional_special_tokens=lowercase , **lowercase , )
lowerCamelCase_ = vocab_file
lowerCamelCase_ = False if not self.vocab_file else True
lowerCamelCase_ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
lowerCamelCase_ = {
lang_code: self.convert_tokens_to_ids(lowercase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase_ = src_lang if src_lang is not None else "en_XX"
lowerCamelCase_ = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def SCREAMING_SNAKE_CASE_( self ) -> str:
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , **lowercase ) -> List[Any]:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
lowerCamelCase_ = src_lang
lowerCamelCase_ = self(lowercase , add_special_tokens=lowercase , return_tensors=lowercase , **lowercase )
lowerCamelCase_ = self.convert_tokens_to_ids(lowercase )
lowerCamelCase_ = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = "en_XX" , lowercase = None , lowercase = "ro_RO" , **lowercase , ) -> BatchEncoding:
lowerCamelCase_ = src_lang
lowerCamelCase_ = tgt_lang
return super().prepare_seqaseq_batch(lowercase , lowercase , **lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = self.convert_tokens_to_ids(lowercase )
lowerCamelCase_ = []
lowerCamelCase_ = [self.eos_token_id, self.cur_lang_code]
lowerCamelCase_ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase_ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = self.convert_tokens_to_ids(lowercase )
lowerCamelCase_ = []
lowerCamelCase_ = [self.eos_token_id, self.cur_lang_code]
lowerCamelCase_ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase_ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.' )
return
lowerCamelCase_ = os.path.join(
lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ):
copyfile(self.vocab_file , lowercase )
return (out_vocab_file,)
| 19 | 0 |
'''simple docstring'''
from functools import reduce
a_ = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def _a( UpperCamelCase__ : List[Any] = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda UpperCamelCase__, UpperCamelCase__ : str(int(lowerCamelCase__ ) * int(lowerCamelCase__ ) ), n[i : i + 1_3] ) )
for i in range(len(lowerCamelCase__ ) - 1_2 ) )
if __name__ == "__main__":
print(F'''{solution() = }''') | 152 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__A =pytest.mark.integration
@require_faiss
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(lowercase ) for x in np.arange(30 ).tolist()]} )
return dset
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
lowerCamelCase_ = dset.map(
lambda lowercase , lowercase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=lowercase , keep_in_memory=lowercase )
lowerCamelCase_ = dset.add_faiss_index("vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
dset.drop_index("vecs" )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase ) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name )
dset.load_faiss_index("vecs2" , tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(lowercase , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
from elasticsearch import Elasticsearch
lowerCamelCase_ = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
lowerCamelCase_ = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
lowerCamelCase_ = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=lowercase )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("filename" , "my_name-train_29" )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
@require_faiss
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase )
self.assertRaises(lowercase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCamelCase_ = np.eye(5 , dtype=np.floataa )[::-1]
lowerCamelCase_ , lowerCamelCase_ = index.search_batch(lowercase )
self.assertRaises(lowercase , index.search_batch , queries[0] )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
import faiss
lowerCamelCase_ = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCamelCase_ = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(lowercase ):
lowerCamelCase_ = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
import faiss
lowerCamelCase_ = faiss.IndexFlat(5 )
lowerCamelCase_ = FaissIndex(custom_index=lowercase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase ) as tmp_file:
index.save(tmp_file.name )
lowerCamelCase_ = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def lowerCamelCase_ ( lowerCamelCase__ ):
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowerCamelCase_ = "index.faiss"
lowerCamelCase_ = F'mock://{index_name}'
index.save(lowerCamelCase__ , storage_options=mockfs.storage_options )
lowerCamelCase_ = FaissIndex.load(lowerCamelCase__ , storage_options=mockfs.storage_options )
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ , lowerCamelCase_ = index.search(lowerCamelCase__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
lowerCamelCase_ = Elasticsearch()
lowerCamelCase_ = {"acknowledged": True}
lowerCamelCase_ = ElasticSearchIndex(es_client=lowercase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
lowerCamelCase_ = "foo"
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCamelCase_ = "foo"
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCamelCase_ = ["foo", "bar", "foobar"]
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search_batch(lowercase )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase ) , 0 )
self.assertListEqual([1, 1, 1] , lowercase )
# batched queries with timeout
lowerCamelCase_ = ["foo", "bar", "foobar"]
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search_batch(lowercase , request_timeout=30 )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase ) , 0 )
self.assertListEqual([1, 1, 1] , lowercase )
| 19 | 0 |
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class __A ( snake_case_ ):
"""simple docstring"""
UpperCamelCase__ : Any ="""MCTCTFeatureExtractor"""
UpperCamelCase__ : Union[str, Any] ="""AutoTokenizer"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Tuple =self.feature_extractor
__UpperCamelCase : Union[str, Any] =False
def __call__( self , *lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase__ , **lowerCamelCase__ )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
__UpperCamelCase : List[str] =kwargs.pop('raw_speech' )
else:
__UpperCamelCase : Any =kwargs.pop('audio' , lowerCamelCase__ )
__UpperCamelCase : List[Any] =kwargs.pop('sampling_rate' , lowerCamelCase__ )
__UpperCamelCase : Dict =kwargs.pop('text' , lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
__UpperCamelCase : Union[str, Any] =args[0]
__UpperCamelCase : int =args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
__UpperCamelCase : Dict =self.feature_extractor(lowerCamelCase__ , *lowerCamelCase__ , sampling_rate=lowerCamelCase__ , **lowerCamelCase__ )
if text is not None:
__UpperCamelCase : Optional[Any] =self.tokenizer(lowerCamelCase__ , **lowerCamelCase__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__UpperCamelCase : List[str] =encodings['input_ids']
return inputs
def __lowercase ( self , *lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def __lowercase ( self , *lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*lowerCamelCase__ , **lowerCamelCase__ )
__UpperCamelCase : Any =kwargs.pop('input_features' , lowerCamelCase__ )
__UpperCamelCase : Any =kwargs.pop('labels' , lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
__UpperCamelCase : int =args[0]
__UpperCamelCase : Union[str, Any] =args[1:]
if input_features is not None:
__UpperCamelCase : Tuple =self.feature_extractor.pad(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
if labels is not None:
__UpperCamelCase : Any =self.tokenizer.pad(lowerCamelCase__ , **lowerCamelCase__ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
__UpperCamelCase : str =labels['input_ids']
return input_features
def __lowercase ( self , *lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@contextmanager
def __lowercase ( self ):
"""simple docstring"""
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
__UpperCamelCase : Optional[Any] =True
__UpperCamelCase : Union[str, Any] =self.tokenizer
yield
__UpperCamelCase : int =self.feature_extractor
__UpperCamelCase : int =False
| 71 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.0_2 , lowercase=3 , lowercase=4 , lowercase=None , ) -> List[str]:
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
lowerCamelCase_ = self.vocab_size - 1
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowerCamelCase_ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> Dict:
lowerCamelCase_ = OpenAIGPTModel(config=lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , head_mask=lowercase )
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase )
lowerCamelCase_ = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> int:
lowerCamelCase_ = OpenAIGPTLMHeadModel(lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> Dict:
lowerCamelCase_ = OpenAIGPTDoubleHeadsModel(lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> int:
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = OpenAIGPTForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowerCAmelCase__ = (
{
'feature-extraction': OpenAIGPTModel,
'text-classification': OpenAIGPTForSequenceClassification,
'text-generation': OpenAIGPTLMHeadModel,
'zero-shot': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase=False ) -> Any:
lowerCamelCase_ = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowerCamelCase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowercase , )
lowerCamelCase_ = inputs_dict["labels"]
lowerCamelCase_ = inputs_dict["labels"]
lowerCamelCase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowercase , )
lowerCamelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
return inputs_dict
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = OpenAIGPTModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=lowercase , n_embd=37 )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowercase )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Any:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = OpenAIGPTModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" )
model.to(lowercase )
lowerCamelCase_ = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=lowercase ) # the president is
lowerCamelCase_ = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowerCamelCase_ = model.generate(lowercase , do_sample=lowercase )
self.assertListEqual(output_ids[0].tolist() , lowercase )
| 19 | 0 |
'''simple docstring'''
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
a_ : Any = "bert-base-cased"
a_ : Dict = "google/pegasus-xsum"
a_ : Any = [" Sam ate lunch today.", "Sams lunch ingredients."]
a_ : Optional[int] = ["A very interesting story about what I ate for lunch.", "Avocado, celery, turkey, coffee"]
a_ : Any = "patrickvonplaten/t5-tiny-random"
a_ : Tuple = "sshleifer/bart-tiny-random"
a_ : str = "sshleifer/tiny-mbart"
a_ : List[str] = "sshleifer/tiny-marian-en-de"
def _A (lowerCAmelCase__ :Tuple , lowerCAmelCase__ :int ) -> Optional[int]:
'''simple docstring'''
_a = '\n'.join(lowerCamelCase__ )
Path(lowerCamelCase__ ).open('w' ).writelines(lowerCamelCase__ )
def _A (lowerCAmelCase__ :Any ) -> Optional[int]:
'''simple docstring'''
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(lowerCamelCase__ , f'{split}.source' ) , lowerCamelCase__ )
_dump_articles(os.path.join(lowerCamelCase__ , f'{split}.target' ) , lowerCamelCase__ )
return tmp_dir
class a ( snake_case_ ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def __UpperCAmelCase ( self , __magic_name__ ) -> List[Any]:
_a = AutoTokenizer.from_pretrained(__magic_name__ )
_a = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_a = max(len(tokenizer.encode(__magic_name__ ) ) for a in ARTICLES )
_a = max(len(tokenizer.encode(__magic_name__ ) ) for a in SUMMARIES )
_a = 4
_a = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
_a , _a = 'ro_RO', 'de_DE' # ignored for all but mbart, but never causes error.
_a = SeqaSeqDataset(
__magic_name__ , data_dir=__magic_name__ , type_path='train' , max_source_length=__magic_name__ , max_target_length=__magic_name__ , src_lang=__magic_name__ , tgt_lang=__magic_name__ , )
_a = DataLoader(__magic_name__ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(__magic_name__ , __magic_name__ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
_a = shift_tokens_right(batch['labels'] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def __UpperCAmelCase ( self , __magic_name__ ) -> str:
_a = AutoTokenizer.from_pretrained(__magic_name__ )
_a = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_a = max(len(tokenizer.encode(__magic_name__ ) ) for a in ARTICLES )
_a = max(len(tokenizer.encode(__magic_name__ ) ) for a in SUMMARIES )
_a = 4
_a = LegacySeqaSeqDataset(
__magic_name__ , data_dir=__magic_name__ , type_path='train' , max_source_length=20 , max_target_length=__magic_name__ , )
_a = DataLoader(__magic_name__ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def __UpperCAmelCase ( self ) -> str:
_a = AutoTokenizer.from_pretrained('facebook/mbart-large-cc25' )
_a = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
_a = tmp_dir.joinpath('train.source' ).open().readlines()
_a = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(__magic_name__ , __magic_name__ , 1_28 , __magic_name__ )
_a = {x.name for x in tmp_dir.iterdir()}
_a = {x.name for x in save_dir.iterdir()}
_a = save_dir.joinpath('train.source' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(__magic_name__ ) < len(__magic_name__ )
assert len(__magic_name__ ) == 1
assert len(packed_examples[0] ) == sum(len(__magic_name__ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='This test requires fairseq' )
def __UpperCAmelCase ( self ) -> Optional[Any]:
if not FAIRSEQ_AVAILABLE:
return
_a , _a , _a = self._get_dataset(max_len=64 )
_a = 64
_a = ds.make_dynamic_sampler(__magic_name__ , required_batch_size_multiple=__magic_name__ )
_a = [len(__magic_name__ ) for x in batch_sampler]
assert len(set(__magic_name__ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(__magic_name__ ) == len(__magic_name__ ) # no dropped or added examples
_a = DataLoader(__magic_name__ , batch_sampler=__magic_name__ , collate_fn=ds.collate_fn , num_workers=2 )
_a = []
_a = []
for batch in data_loader:
_a = batch['input_ids'].shape
_a = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
_a = np.product(batch['input_ids'].shape )
num_src_per_batch.append(__magic_name__ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(__magic_name__ )
assert num_src_per_batch[0] == max(__magic_name__ )
if failures:
raise AssertionError(f'too many tokens in {len(__magic_name__ )} batches' )
def __UpperCAmelCase ( self ) -> Tuple:
_a , _a , _a = self._get_dataset(max_len=5_12 )
_a = 2
_a = ds.make_sortish_sampler(__magic_name__ , shuffle=__magic_name__ )
_a = DataLoader(__magic_name__ , batch_size=__magic_name__ , collate_fn=ds.collate_fn , num_workers=2 )
_a = DataLoader(__magic_name__ , batch_size=__magic_name__ , collate_fn=ds.collate_fn , num_workers=2 , sampler=__magic_name__ )
_a = tokenizer.pad_token_id
def count_pad_tokens(__magic_name__ , __magic_name__="input_ids" ):
return [batch[k].eq(__magic_name__ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(__magic_name__ , k='labels' ) ) < sum(count_pad_tokens(__magic_name__ , k='labels' ) )
assert sum(count_pad_tokens(__magic_name__ ) ) < sum(count_pad_tokens(__magic_name__ ) )
assert len(__magic_name__ ) == len(__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__=10_00 , __magic_name__=1_28 ) -> str:
if os.getenv('USE_REAL_DATA' , __magic_name__ ):
_a = 'examples/seq2seq/wmt_en_ro'
_a = max_len * 2 * 64
if not Path(__magic_name__ ).joinpath('train.len' ).exists():
save_len_file(__magic_name__ , __magic_name__ )
else:
_a = 'examples/seq2seq/test_data/wmt_en_ro'
_a = max_len * 4
save_len_file(__magic_name__ , __magic_name__ )
_a = AutoTokenizer.from_pretrained(__magic_name__ )
_a = SeqaSeqDataset(
__magic_name__ , data_dir=__magic_name__ , type_path='train' , max_source_length=__magic_name__ , max_target_length=__magic_name__ , n_obs=__magic_name__ , )
return ds, max_tokens, tokenizer
def __UpperCAmelCase ( self ) -> int:
_a , _a , _a = self._get_dataset()
_a = set(DistributedSortishSampler(__magic_name__ , 2_56 , num_replicas=2 , rank=0 , add_extra_examples=__magic_name__ ) )
_a = set(DistributedSortishSampler(__magic_name__ , 2_56 , num_replicas=2 , rank=1 , add_extra_examples=__magic_name__ ) )
assert idsa.intersection(__magic_name__ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def __UpperCAmelCase ( self , __magic_name__ ) -> Union[str, Any]:
_a = AutoTokenizer.from_pretrained(__magic_name__ , use_fast=__magic_name__ )
if tok_name == MBART_TINY:
_a = SeqaSeqDataset(
__magic_name__ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , src_lang='EN' , tgt_lang='FR' , )
_a = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
_a = SeqaSeqDataset(
__magic_name__ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , )
_a = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(__magic_name__ ) == 1 if tok_name == BART_TINY else len(__magic_name__ ) == 0
| 168 |
__A ={str(digit): digit**5 for digit in range(1_0)}
def lowerCamelCase_ ( lowerCamelCase__ ):
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(lowerCamelCase__ ) )
def lowerCamelCase_ ( ):
return sum(
number
for number in range(1_0_0_0 , 1_0_0_0_0_0_0 )
if number == digits_fifth_powers_sum(lowerCamelCase__ ) )
if __name__ == "__main__":
print(solution())
| 19 | 0 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
lowercase_ = logging.get_logger(__name__)
# General docstring
lowercase_ = 'ResNetConfig'
# Base docstring
lowercase_ = 'microsoft/resnet-50'
lowercase_ = [1, 2_0_4_8, 7, 7]
# Image classification docstring
lowercase_ = 'microsoft/resnet-50'
lowercase_ = 'tiger cat'
lowercase_ = [
'microsoft/resnet-50',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class __lowerCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 3 , lowerCAmelCase = 1 , lowerCAmelCase = "relu" ) -> List[Any]:
'''simple docstring'''
super().__init__()
_lowercase =nn.Convad(
lowerCAmelCase , lowerCAmelCase , kernel_size=lowerCAmelCase , stride=lowerCAmelCase , padding=kernel_size // 2 , bias=lowerCAmelCase )
_lowercase =nn.BatchNormad(lowerCAmelCase )
_lowercase =ACTaFN[activation] if activation is not None else nn.Identity()
def A__ ( self , lowerCAmelCase ) -> Tensor:
'''simple docstring'''
_lowercase =self.convolution(lowerCAmelCase )
_lowercase =self.normalization(lowerCAmelCase )
_lowercase =self.activation(lowerCAmelCase )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase ) -> List[str]:
'''simple docstring'''
super().__init__()
_lowercase =ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_lowercase =nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_lowercase =config.num_channels
def A__ ( self , lowerCAmelCase ) -> Tensor:
'''simple docstring'''
_lowercase =pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
_lowercase =self.embedder(lowerCAmelCase )
_lowercase =self.pooler(lowerCAmelCase )
return embedding
class __lowerCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 2 ) -> Dict:
'''simple docstring'''
super().__init__()
_lowercase =nn.Convad(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , stride=lowerCAmelCase , bias=lowerCAmelCase )
_lowercase =nn.BatchNormad(lowerCAmelCase )
def A__ ( self , lowerCAmelCase ) -> Tensor:
'''simple docstring'''
_lowercase =self.convolution(lowerCAmelCase )
_lowercase =self.normalization(lowerCAmelCase )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1 , lowerCAmelCase = "relu" ) -> List[Any]:
'''simple docstring'''
super().__init__()
_lowercase =in_channels != out_channels or stride != 1
_lowercase =(
ResNetShortCut(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase ) if should_apply_shortcut else nn.Identity()
)
_lowercase =nn.Sequential(
ResNetConvLayer(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase ) , ResNetConvLayer(lowerCAmelCase , lowerCAmelCase , activation=lowerCAmelCase ) , )
_lowercase =ACTaFN[activation]
def A__ ( self , lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
_lowercase =hidden_state
_lowercase =self.layer(lowerCAmelCase )
_lowercase =self.shortcut(lowerCAmelCase )
hidden_state += residual
_lowercase =self.activation(lowerCAmelCase )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1 , lowerCAmelCase = "relu" , lowerCAmelCase = 4 ) -> List[Any]:
'''simple docstring'''
super().__init__()
_lowercase =in_channels != out_channels or stride != 1
_lowercase =out_channels // reduction
_lowercase =(
ResNetShortCut(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase ) if should_apply_shortcut else nn.Identity()
)
_lowercase =nn.Sequential(
ResNetConvLayer(lowerCAmelCase , lowerCAmelCase , kernel_size=1 ) , ResNetConvLayer(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase ) , ResNetConvLayer(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , activation=lowerCAmelCase ) , )
_lowercase =ACTaFN[activation]
def A__ ( self , lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
_lowercase =hidden_state
_lowercase =self.layer(lowerCAmelCase )
_lowercase =self.shortcut(lowerCAmelCase )
hidden_state += residual
_lowercase =self.activation(lowerCAmelCase )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 2 , lowerCAmelCase = 2 , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
_lowercase =ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
_lowercase =nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase , activation=config.hidden_act ) , *[layer(lowerCAmelCase , lowerCAmelCase , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def A__ ( self , lowerCAmelCase ) -> Tensor:
'''simple docstring'''
_lowercase =input
for layer in self.layers:
_lowercase =layer(lowerCAmelCase )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
_lowercase =nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowerCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_lowercase =zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCAmelCase , config.depths[1:] ):
self.stages.append(ResNetStage(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , depth=lowerCAmelCase ) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase = False , lowerCAmelCase = True ) -> BaseModelOutputWithNoAttention:
'''simple docstring'''
_lowercase =() if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_lowercase =hidden_states + (hidden_state,)
_lowercase =stage_module(lowerCAmelCase )
if output_hidden_states:
_lowercase =hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowerCAmelCase , hidden_states=lowerCAmelCase , )
class __lowerCAmelCase ( snake_case_ ):
_a = ResNetConfig
_a = """resnet"""
_a = """pixel_values"""
_a = True
def A__ ( self , lowerCAmelCase ) -> Tuple:
'''simple docstring'''
if isinstance(lowerCAmelCase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(lowerCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def A__ ( self , lowerCAmelCase , lowerCAmelCase=False ) -> Tuple:
'''simple docstring'''
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_lowercase =value
lowercase_ = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowercase_ = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""The bare ResNet model outputting raw features without any specific head on top.""" , snake_case_ , )
class __lowerCAmelCase ( snake_case_ ):
def __init__( self , lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase )
_lowercase =config
_lowercase =ResNetEmbeddings(lowerCAmelCase )
_lowercase =ResNetEncoder(lowerCAmelCase )
_lowercase =nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None ) -> BaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
_lowercase =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowercase =return_dict if return_dict is not None else self.config.use_return_dict
_lowercase =self.embedder(lowerCAmelCase )
_lowercase =self.encoder(
lowerCAmelCase , output_hidden_states=lowerCAmelCase , return_dict=lowerCAmelCase )
_lowercase =encoder_outputs[0]
_lowercase =self.pooler(lowerCAmelCase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase , pooler_output=lowerCAmelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n """ , snake_case_ , )
class __lowerCAmelCase ( snake_case_ ):
def __init__( self , lowerCAmelCase ) -> str:
'''simple docstring'''
super().__init__(lowerCAmelCase )
_lowercase =config.num_labels
_lowercase =ResNetModel(lowerCAmelCase )
# classification head
_lowercase =nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A__ ( self , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , ) -> ImageClassifierOutputWithNoAttention:
'''simple docstring'''
_lowercase =return_dict if return_dict is not None else self.config.use_return_dict
_lowercase =self.resnet(lowerCAmelCase , output_hidden_states=lowerCAmelCase , return_dict=lowerCAmelCase )
_lowercase =outputs.pooler_output if return_dict else outputs[1]
_lowercase =self.classifier(lowerCAmelCase )
_lowercase =None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_lowercase ='regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_lowercase ='single_label_classification'
else:
_lowercase ='multi_label_classification'
if self.config.problem_type == "regression":
_lowercase =MSELoss()
if self.num_labels == 1:
_lowercase =loss_fct(logits.squeeze() , labels.squeeze() )
else:
_lowercase =loss_fct(lowerCAmelCase , lowerCAmelCase )
elif self.config.problem_type == "single_label_classification":
_lowercase =CrossEntropyLoss()
_lowercase =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_lowercase =BCEWithLogitsLoss()
_lowercase =loss_fct(lowerCAmelCase , lowerCAmelCase )
if not return_dict:
_lowercase =(logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase , logits=lowerCAmelCase , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"""\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n """ , snake_case_ , )
class __lowerCAmelCase ( snake_case_ , snake_case_ ):
def __init__( self , lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
super().__init__(lowerCAmelCase )
super()._init_backbone(lowerCAmelCase )
_lowercase =[config.embedding_size] + config.hidden_sizes
_lowercase =ResNetEmbeddings(lowerCAmelCase )
_lowercase =ResNetEncoder(lowerCAmelCase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase )
@replace_return_docstrings(output_type=lowerCAmelCase , config_class=_CONFIG_FOR_DOC )
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None ) -> BackboneOutput:
'''simple docstring'''
_lowercase =return_dict if return_dict is not None else self.config.use_return_dict
_lowercase =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowercase =self.embedder(lowerCAmelCase )
_lowercase =self.encoder(lowerCAmelCase , output_hidden_states=lowerCAmelCase , return_dict=lowerCAmelCase )
_lowercase =outputs.hidden_states
_lowercase =()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_lowercase =(feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowerCAmelCase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowerCAmelCase , )
| 205 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A =logging.get_logger(__name__)
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowerCamelCase_ = 1_9_2
lowerCamelCase_ = 7_6_8
lowerCamelCase_ = 1_2
lowerCamelCase_ = 3
lowerCamelCase_ = [8_0_0, 1_3_3_3]
lowerCamelCase_ = False
elif yolos_name == "yolos_s_dWr":
lowerCamelCase_ = 3_3_0
lowerCamelCase_ = 1_4
lowerCamelCase_ = 6
lowerCamelCase_ = 1_3_2_0
elif "yolos_s" in yolos_name:
lowerCamelCase_ = 3_8_4
lowerCamelCase_ = 1_5_3_6
lowerCamelCase_ = 1_2
lowerCamelCase_ = 6
elif "yolos_b" in yolos_name:
lowerCamelCase_ = [8_0_0, 1_3_4_4]
lowerCamelCase_ = 9_1
lowerCamelCase_ = "huggingface/label-files"
lowerCamelCase_ = "coco-detection-id2label.json"
lowerCamelCase_ = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowerCamelCase_ = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase_ = idalabel
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase_ = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
lowerCamelCase_ = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase_ = in_proj_weight[: config.hidden_size, :]
lowerCamelCase_ = in_proj_bias[: config.hidden_size]
lowerCamelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase_ = in_proj_weight[-config.hidden_size :, :]
lowerCamelCase_ = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_ ( lowerCamelCase__ ):
if "backbone" in name:
lowerCamelCase_ = name.replace("backbone" , "vit" )
if "cls_token" in name:
lowerCamelCase_ = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
lowerCamelCase_ = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
lowerCamelCase_ = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
lowerCamelCase_ = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowerCamelCase_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
lowerCamelCase_ = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
lowerCamelCase_ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowerCamelCase_ = name.replace("attn" , "attention.self" )
if "norm1" in name:
lowerCamelCase_ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowerCamelCase_ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowerCamelCase_ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowerCamelCase_ = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
lowerCamelCase_ = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
lowerCamelCase_ = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
lowerCamelCase_ = name.replace("vit.norm" , "vit.layernorm" )
return name
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
for key in orig_state_dict.copy().keys():
lowerCamelCase_ = orig_state_dict.pop(lowerCamelCase__ )
if "qkv" in key:
lowerCamelCase_ = key.split("." )
lowerCamelCase_ = int(key_split[2] )
lowerCamelCase_ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowerCamelCase_ = val[:dim, :]
lowerCamelCase_ = val[
dim : dim * 2, :
]
lowerCamelCase_ = val[-dim:, :]
else:
lowerCamelCase_ = val[:dim]
lowerCamelCase_ = val[dim : dim * 2]
lowerCamelCase_ = val[-dim:]
else:
lowerCamelCase_ = val
return orig_state_dict
def lowerCamelCase_ ( ):
lowerCamelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase_ = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
lowerCamelCase_ = get_yolos_config(lowerCamelCase__ )
# load original state_dict
lowerCamelCase_ = torch.load(lowerCamelCase__ , map_location="cpu" )["model"]
# load 🤗 model
lowerCamelCase_ = YolosForObjectDetection(lowerCamelCase__ )
model.eval()
lowerCamelCase_ = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image, prepared by YolosImageProcessor
lowerCamelCase_ = 8_0_0 if yolos_name != "yolos_ti" else 5_1_2
lowerCamelCase_ = YolosImageProcessor(format="coco_detection" , size=lowerCamelCase__ )
lowerCamelCase_ = image_processor(images=prepare_img() , return_tensors="pt" )
lowerCamelCase_ = model(**lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ = outputs.logits, outputs.pred_boxes
lowerCamelCase_ , lowerCamelCase_ = None, None
if yolos_name == "yolos_ti":
lowerCamelCase_ = torch.tensor(
[[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]] )
lowerCamelCase_ = torch.tensor(
[[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]] )
elif yolos_name == "yolos_s_200_pre":
lowerCamelCase_ = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] )
lowerCamelCase_ = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] )
elif yolos_name == "yolos_s_300_pre":
lowerCamelCase_ = torch.tensor(
[[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]] )
lowerCamelCase_ = torch.tensor(
[[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]] )
elif yolos_name == "yolos_s_dWr":
lowerCamelCase_ = torch.tensor(
[[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]] )
lowerCamelCase_ = torch.tensor(
[[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]] )
elif yolos_name == "yolos_base":
lowerCamelCase_ = torch.tensor(
[[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]] )
lowerCamelCase_ = torch.tensor(
[[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]] )
else:
raise ValueError(F'Unknown yolos_name: {yolos_name}' )
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(F'Saving model {yolos_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
lowerCamelCase_ = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
lowerCamelCase_ = model_mapping[yolos_name]
image_processor.push_to_hub(lowerCamelCase__ , organization="hustvl" )
model.push_to_hub(lowerCamelCase__ , organization="hustvl" )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__A =parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 19 | 0 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCAmelCase__ = get_tests_dir('fixtures')
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self) -> List[str]:
# A mock response for an HTTP head request to emulate server down
_A : List[Any] = mock.Mock()
_A : int = 5_0_0
_A : List[str] = {}
_A : Optional[int] = HTTPError
_A : Optional[Any] = {}
# Download this model to make sure it's in the cache.
_A : Dict = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2")
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=__lowerCamelCase) as mock_head:
_A : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2")
# This check we did call the fake head request
mock_head.assert_called()
def _lowerCamelCase ( self) -> Optional[Any]:
# This test is for deprecated behavior and can be removed in v5
_A : List[str] = WavaVecaFeatureExtractor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json")
@is_staging_test
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
@classmethod
def _lowerCamelCase ( cls) -> Optional[Any]:
_A : Tuple = TOKEN
HfFolder.save_token(__lowerCamelCase)
@classmethod
def _lowerCamelCase ( cls) -> List[str]:
try:
delete_repo(token=cls._token , repo_id="test-feature-extractor")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-feature-extractor-org")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-feature-extractor")
except HTTPError:
pass
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Tuple = WavaVecaFeatureExtractor.from_pretrained(__lowerCamelCase)
feature_extractor.push_to_hub("test-feature-extractor" , use_auth_token=self._token)
_A : List[str] = WavaVecaFeatureExtractor.from_pretrained(F"{USER}/test-feature-extractor")
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
# Reset repo
delete_repo(token=self._token , repo_id="test-feature-extractor")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__lowerCamelCase , repo_id="test-feature-extractor" , push_to_hub=__lowerCamelCase , use_auth_token=self._token)
_A : Tuple = WavaVecaFeatureExtractor.from_pretrained(F"{USER}/test-feature-extractor")
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
def _lowerCamelCase ( self) -> List[Any]:
_A : Tuple = WavaVecaFeatureExtractor.from_pretrained(__lowerCamelCase)
feature_extractor.push_to_hub("valid_org/test-feature-extractor" , use_auth_token=self._token)
_A : Dict = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor")
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-feature-extractor")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__lowerCamelCase , repo_id="valid_org/test-feature-extractor-org" , push_to_hub=__lowerCamelCase , use_auth_token=self._token)
_A : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org")
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
def _lowerCamelCase ( self) -> Any:
CustomFeatureExtractor.register_for_auto_class()
_A : int = CustomFeatureExtractor.from_pretrained(__lowerCamelCase)
feature_extractor.push_to_hub("test-dynamic-feature-extractor" , use_auth_token=self._token)
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} , )
_A : str = AutoFeatureExtractor.from_pretrained(
F"{USER}/test-dynamic-feature-extractor" , trust_remote_code=__lowerCamelCase)
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , "CustomFeatureExtractor")
| 11 |
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = [0 for i in range(r + 1 )]
# nc0 = 1
lowerCamelCase_ = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
lowerCamelCase_ = min(lowerCamelCase__ , lowerCamelCase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=1_0, r=5))
| 19 | 0 |
'''simple docstring'''
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class a__ ( snake_case_ , snake_case_ ):
@register_to_config
def __init__( self : List[Any] , a : Any = 1_28 , a : Tuple = 2_56 , a : List[str] = 20_00.0 , a : Optional[int] = 7_68 , a : List[str] = 12 , a : Dict = 12 , a : Optional[Any] = 64 , a : Union[str, Any] = 20_48 , a : Any = 0.1 , ):
"""simple docstring"""
super().__init__()
__lowerCamelCase = nn.Sequential(
nn.Linear(a , d_model * 4 , bias=a ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=a ) , nn.SiLU() , )
__lowerCamelCase = nn.Embedding(a , a )
__lowerCamelCase = False
__lowerCamelCase = nn.Linear(a , a , bias=a )
__lowerCamelCase = nn.Dropout(p=a )
__lowerCamelCase = nn.ModuleList()
for lyr_num in range(a ):
# FiLM conditional T5 decoder
__lowerCamelCase = DecoderLayer(d_model=a , d_kv=a , num_heads=a , d_ff=a , dropout_rate=a )
self.decoders.append(a )
__lowerCamelCase = TaLayerNorm(a )
__lowerCamelCase = nn.Dropout(p=a )
__lowerCamelCase = nn.Linear(a , a , bias=a )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : Optional[Any] , a : Any ):
"""simple docstring"""
__lowerCamelCase = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : Dict , a : Any , a : Tuple ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
__lowerCamelCase = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
__lowerCamelCase = self.conditioning_emb(a ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
__lowerCamelCase = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
__lowerCamelCase = torch.broadcast_to(
torch.arange(a , device=decoder_input_tokens.device ) , (batch, seq_length) , )
__lowerCamelCase = self.position_encoding(a )
__lowerCamelCase = self.continuous_inputs_projection(a )
inputs += position_encodings
__lowerCamelCase = self.dropout(a )
# decoder: No padding present.
__lowerCamelCase = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
__lowerCamelCase = [(x, self.encoder_decoder_mask(a , a )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
__lowerCamelCase = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
__lowerCamelCase = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
__lowerCamelCase = lyr(
a , conditioning_emb=a , encoder_hidden_states=a , encoder_attention_mask=a , )[0]
__lowerCamelCase = self.decoder_norm(a )
__lowerCamelCase = self.post_dropout(a )
__lowerCamelCase = self.spec_out(a )
return spec_out
class a__ ( nn.Module ):
def __init__( self : Tuple , a : List[str] , a : List[str] , a : Any , a : Optional[int] , a : List[Any] , a : Tuple=1e-6 ):
"""simple docstring"""
super().__init__()
__lowerCamelCase = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=a , d_kv=a , num_heads=a , dropout_rate=a ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=a , d_kv=a , num_heads=a , dropout_rate=a , layer_norm_epsilon=a , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=a , d_ff=a , dropout_rate=a , layer_norm_epsilon=a ) )
def SCREAMING_SNAKE_CASE__ ( self : Any , a : int , a : Any=None , a : List[Any]=None , a : List[Any]=None , a : Union[str, Any]=None , a : Optional[int]=None , ):
"""simple docstring"""
__lowerCamelCase = self.layer[0](
a , conditioning_emb=a , attention_mask=a , )
if encoder_hidden_states is not None:
__lowerCamelCase = torch.where(encoder_attention_mask > 0 , 0 , -1e1_0 ).to(
encoder_hidden_states.dtype )
__lowerCamelCase = self.layer[1](
a , key_value_states=a , attention_mask=a , )
# Apply Film Conditional Feed Forward layer
__lowerCamelCase = self.layer[-1](a , a )
return (hidden_states,)
class a__ ( nn.Module ):
def __init__( self : str , a : List[Any] , a : Optional[Any] , a : Tuple , a : Optional[Any] ):
"""simple docstring"""
super().__init__()
__lowerCamelCase = TaLayerNorm(a )
__lowerCamelCase = TaFiLMLayer(in_features=d_model * 4 , out_features=a )
__lowerCamelCase = Attention(query_dim=a , heads=a , dim_head=a , out_bias=a , scale_qk=a )
__lowerCamelCase = nn.Dropout(a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : List[str] , a : List[str]=None , a : Tuple=None , ):
"""simple docstring"""
__lowerCamelCase = self.layer_norm(a )
if conditioning_emb is not None:
__lowerCamelCase = self.FiLMLayer(a , a )
# Self-attention block
__lowerCamelCase = self.attention(a )
__lowerCamelCase = hidden_states + self.dropout(a )
return hidden_states
class a__ ( nn.Module ):
def __init__( self : int , a : str , a : str , a : Dict , a : Any , a : List[str] ):
"""simple docstring"""
super().__init__()
__lowerCamelCase = Attention(query_dim=a , heads=a , dim_head=a , out_bias=a , scale_qk=a )
__lowerCamelCase = TaLayerNorm(a , eps=a )
__lowerCamelCase = nn.Dropout(a )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : Union[str, Any] , a : Union[str, Any]=None , a : Union[str, Any]=None , ):
"""simple docstring"""
__lowerCamelCase = self.layer_norm(a )
__lowerCamelCase = self.attention(
a , encoder_hidden_states=a , attention_mask=attention_mask.squeeze(1 ) , )
__lowerCamelCase = hidden_states + self.dropout(a )
return layer_output
class a__ ( nn.Module ):
def __init__( self : str , a : List[str] , a : int , a : Tuple , a : Tuple ):
"""simple docstring"""
super().__init__()
__lowerCamelCase = TaDenseGatedActDense(d_model=a , d_ff=a , dropout_rate=a )
__lowerCamelCase = TaFiLMLayer(in_features=d_model * 4 , out_features=a )
__lowerCamelCase = TaLayerNorm(a , eps=a )
__lowerCamelCase = nn.Dropout(a )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : Any , a : List[str]=None ):
"""simple docstring"""
__lowerCamelCase = self.layer_norm(a )
if conditioning_emb is not None:
__lowerCamelCase = self.film(a , a )
__lowerCamelCase = self.DenseReluDense(a )
__lowerCamelCase = hidden_states + self.dropout(a )
return hidden_states
class a__ ( nn.Module ):
def __init__( self : Optional[Any] , a : int , a : List[Any] , a : Optional[Any] ):
"""simple docstring"""
super().__init__()
__lowerCamelCase = nn.Linear(a , a , bias=a )
__lowerCamelCase = nn.Linear(a , a , bias=a )
__lowerCamelCase = nn.Linear(a , a , bias=a )
__lowerCamelCase = nn.Dropout(a )
__lowerCamelCase = NewGELUActivation()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : List[str] ):
"""simple docstring"""
__lowerCamelCase = self.act(self.wi_a(a ) )
__lowerCamelCase = self.wi_a(a )
__lowerCamelCase = hidden_gelu * hidden_linear
__lowerCamelCase = self.dropout(a )
__lowerCamelCase = self.wo(a )
return hidden_states
class a__ ( nn.Module ):
def __init__( self : Union[str, Any] , a : Union[str, Any] , a : Optional[int]=1e-6 ):
"""simple docstring"""
super().__init__()
__lowerCamelCase = nn.Parameter(torch.ones(a ) )
__lowerCamelCase = eps
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : Tuple ):
"""simple docstring"""
__lowerCamelCase = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=a )
__lowerCamelCase = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
__lowerCamelCase = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class a__ ( nn.Module ):
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : Any ):
"""simple docstring"""
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_47_15 * torch.pow(a , 3.0 )) ))
class a__ ( nn.Module ):
def __init__( self : Any , a : str , a : Dict ):
"""simple docstring"""
super().__init__()
__lowerCamelCase = nn.Linear(a , out_features * 2 , bias=a )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : Optional[Any] , a : str ):
"""simple docstring"""
__lowerCamelCase = self.scale_bias(a )
__lowerCamelCase , __lowerCamelCase = torch.chunk(a , 2 , -1 )
__lowerCamelCase = x * (1 + scale) + shift
return x
| 67 |
import math
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(lowerCamelCase__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("This should never happen" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
__A ='''Enter the base and the power separated by a comma: '''
__A, __A =map(int, input(prompt).split(''','''))
__A, __A =map(int, input(prompt).split(''','''))
# We find the log of each number, using the function res(), which takes two
# arguments.
__A =res(xa, ya)
__A =res(xa, ya)
# We check for the largest number
if resa > resa:
print('''Largest number is''', xa, '''^''', ya)
elif resa > resa:
print('''Largest number is''', xa, '''^''', ya)
else:
print('''Both are equal''')
| 19 | 0 |
def _lowerCAmelCase ( lowerCAmelCase_ :Dict , lowerCAmelCase_ :List[Any] )->Optional[int]:
'''simple docstring'''
snake_case_ = len(lowerCamelCase__ )
snake_case_ = []
for i in range(len(lowerCamelCase__ ) - pat_len + 1 ):
snake_case_ = True
for j in range(lowerCamelCase__ ):
if s[i + j] != pattern[j]:
snake_case_ = False
break
if match_found:
position.append(lowerCamelCase__ )
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
| 159 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
__A =logging.get_logger(__name__)
__A =list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
__A =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Model type selected in the list: ' + ', '.join(snake_case_ )} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowerCAmelCase__ = field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase__ = field(
default=1_28 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowerCAmelCase__ = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowerCAmelCase__ = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowerCAmelCase__ = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase__ = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase__ = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowerCAmelCase__ = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 'train'
lowerCAmelCase__ = 'dev'
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __init__( self , lowercase , lowercase , lowercase = None , lowercase = Split.train , lowercase = False , lowercase = None , lowercase = "pt" , ) -> List[str]:
lowerCamelCase_ = args
lowerCamelCase_ = is_language_sensitive
lowerCamelCase_ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowercase , lowercase ):
try:
lowerCamelCase_ = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
lowerCamelCase_ = mode
# Load data features from cache or dataset file
lowerCamelCase_ = "v2" if args.version_2_with_negative else "v1"
lowerCamelCase_ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase_ = cached_features_file + ".lock"
with FileLock(lowercase ):
if os.path.exists(lowercase ) and not args.overwrite_cache:
lowerCamelCase_ = time.time()
lowerCamelCase_ = torch.load(lowercase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowerCamelCase_ = self.old_features["features"]
lowerCamelCase_ = self.old_features.get("dataset" , lowercase )
lowerCamelCase_ = self.old_features.get("examples" , lowercase )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
" future run" )
else:
if mode == Split.dev:
lowerCamelCase_ = self.processor.get_dev_examples(args.data_dir )
else:
lowerCamelCase_ = self.processor.get_train_examples(args.data_dir )
lowerCamelCase_ , lowerCamelCase_ = squad_convert_examples_to_features(
examples=self.examples , tokenizer=lowercase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=lowercase , )
lowerCamelCase_ = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , lowercase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ) -> Tuple:
return len(self.features )
def __getitem__( self , lowercase ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
lowerCamelCase_ = self.features[i]
lowerCamelCase_ = torch.tensor(feature.input_ids , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.attention_mask , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.cls_index , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.p_mask , dtype=torch.float )
lowerCamelCase_ = torch.tensor(feature.is_impossible , dtype=torch.float )
lowerCamelCase_ = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowerCamelCase_ = torch.tensor(feature.start_position , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 19 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : List[str] = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[int] = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
A : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 57 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
@staticmethod
@abstractmethod
def SCREAMING_SNAKE_CASE_( lowercase ) -> int:
raise NotImplementedError()
@abstractmethod
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
raise NotImplementedError()
| 19 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ : List[Any] =logging.get_logger(__name__)
lowerCAmelCase__ : Optional[Any] ={
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/config.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/config.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'''
),
'''distilbert-base-uncased-finetuned-sst-2-english''': (
'''https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'''
),
}
class UpperCAmelCase_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = '''distilbert'''
UpperCamelCase__ : Dict = {
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self , _A=30_522 , _A=512 , _A=False , _A=6 , _A=12 , _A=768 , _A=4 * 768 , _A=0.1 , _A=0.1 , _A="gelu" , _A=0.0_2 , _A=0.1 , _A=0.2 , _A=0 , **_A , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = sinusoidal_pos_embds
__SCREAMING_SNAKE_CASE = n_layers
__SCREAMING_SNAKE_CASE = n_heads
__SCREAMING_SNAKE_CASE = dim
__SCREAMING_SNAKE_CASE = hidden_dim
__SCREAMING_SNAKE_CASE = dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = qa_dropout
__SCREAMING_SNAKE_CASE = seq_classif_dropout
super().__init__(**_A , pad_token_id=_A )
class UpperCAmelCase_ ( snake_case_ ):
'''simple docstring'''
@property
def _A ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
__SCREAMING_SNAKE_CASE = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__SCREAMING_SNAKE_CASE = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 257 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
__A =logging.get_logger(__name__) # pylint: disable=invalid-name
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> List[Any]:
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
lowerCamelCase_ = (
f'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'
f' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , lowercase , standard_warn=lowercase )
lowerCamelCase_ = dict(scheduler.config )
lowerCamelCase_ = 1
lowerCamelCase_ = FrozenDict(lowercase )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
lowerCamelCase_ = (
f'The configuration file of this scheduler: {scheduler} has not set the configuration'
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , lowercase , standard_warn=lowercase )
lowerCamelCase_ = dict(scheduler.config )
lowerCamelCase_ = True
lowerCamelCase_ = FrozenDict(lowercase )
if safety_checker is None:
logger.warning(
f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=lowercase , segmentation_processor=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , unet=lowercase , scheduler=lowercase , safety_checker=lowercase , feature_extractor=lowercase , )
def SCREAMING_SNAKE_CASE_( self , lowercase = "auto" ) -> Tuple:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
self.enable_attention_slicing(lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> str:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowerCamelCase_ = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(lowercase , lowercase )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , lowercase , lowercase , lowercase , lowercase = 512 , lowercase = 512 , lowercase = 50 , lowercase = 7.5 , lowercase = None , lowercase = 1 , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "pil" , lowercase = True , lowercase = None , lowercase = 1 , **lowercase , ) -> int:
lowerCamelCase_ = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
lowerCamelCase_ = self.segmentation_model(**lowercase )
lowerCamelCase_ = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowerCamelCase_ = self.numpy_to_pil(lowercase )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowerCamelCase_ = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=lowercase , image=lowercase , mask_image=lowercase , height=lowercase , width=lowercase , num_inference_steps=lowercase , guidance_scale=lowercase , negative_prompt=lowercase , num_images_per_prompt=lowercase , eta=lowercase , generator=lowercase , latents=lowercase , output_type=lowercase , return_dict=lowercase , callback=lowercase , callback_steps=lowercase , )
| 19 | 0 |
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''google/umt5-small''': '''https://huggingface.co/google/umt5-small/resolve/main/config.json''',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class lowerCAmelCase_ ( snake_case_ ):
'''simple docstring'''
_lowerCamelCase: Optional[int] = '''umt5'''
_lowerCamelCase: List[str] = ['''past_key_values''']
def __init__( self : Optional[Any] ,A_ : int=25_0112 ,A_ : List[str]=512 ,A_ : List[str]=64 ,A_ : int=1024 ,A_ : List[Any]=8 ,A_ : Union[str, Any]=None ,A_ : Any=6 ,A_ : Union[str, Any]=32 ,A_ : int=128 ,A_ : int=0.1 ,A_ : str=1e-6 ,A_ : int=1.0 ,A_ : int="gated-gelu" ,A_ : Optional[int]=True ,A_ : Any=True ,A_ : Optional[Any]="T5Tokenizer" ,A_ : str=True ,A_ : Any=0 ,A_ : Tuple=1 ,A_ : Dict=0 ,**A_ : Optional[Any] ,) -> List[Any]:
super().__init__(
is_encoder_decoder=A_ ,tokenizer_class=A_ ,tie_word_embeddings=A_ ,pad_token_id=A_ ,eos_token_id=A_ ,decoder_start_token_id=A_ ,**A_ ,)
A = vocab_size
A = d_model
A = d_kv
A = d_ff
A = num_layers
A = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
A = num_heads
A = relative_attention_num_buckets
A = relative_attention_max_distance
A = dropout_rate
A = layer_norm_epsilon
A = initializer_factor
A = feed_forward_proj
A = use_cache
A = self.feed_forward_proj.split('-' )
A = act_info[-1]
A = act_info[0] == 'gated'
if len(A_ ) > 1 and act_info[0] != "gated" or len(A_ ) > 2:
raise ValueError(
F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
if feed_forward_proj == "gated-gelu":
A = 'gelu_new'
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
return self.d_model
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> str:
return self.num_heads
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
return self.num_layers
class lowerCAmelCase_ ( snake_case_ ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
A = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
A = 'past_encoder_sequence + sequence'
A = {0: 'batch'}
A = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
A = {0: 'batch', 1: 'decoder_sequence'}
A = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(A_ ,direction='inputs' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def _SCREAMING_SNAKE_CASE ( self : int ) -> int:
return 13
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> float:
return 5e-4 | 74 |
from collections import deque
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = len(lowerCamelCase__ )
lowerCamelCase_ = deque()
lowerCamelCase_ = [False for _ in range(lowerCamelCase__ )]
lowerCamelCase_ = [-1 for _ in range(lowerCamelCase__ )]
lowerCamelCase_ = index_of[:]
def strong_connect(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = index # the number when this node is seen
lowerCamelCase_ = index # lowest rank node reachable from here
index += 1
stack.append(lowerCamelCase__ )
lowerCamelCase_ = True
for w in g[v]:
if index_of[w] == -1:
lowerCamelCase_ = strong_connect(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
lowerCamelCase_ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
lowerCamelCase_ = []
lowerCamelCase_ = stack.pop()
lowerCamelCase_ = False
component.append(lowerCamelCase__ )
while w != v:
lowerCamelCase_ = stack.pop()
lowerCamelCase_ = False
component.append(lowerCamelCase__ )
components.append(lowerCamelCase__ )
return index
lowerCamelCase_ = []
for v in range(lowerCamelCase__ ):
if index_of[v] == -1:
strong_connect(lowerCamelCase__ , 0 , lowerCamelCase__ )
return components
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = [[] for _ in range(lowerCamelCase__ )]
for u, v in edges:
g[u].append(lowerCamelCase__ )
return g
if __name__ == "__main__":
# Test
__A =7
__A =[0, 0, 1, 2, 3, 3, 4, 4, 6]
__A =[1, 3, 2, 0, 1, 4, 5, 6, 5]
__A =[(u, v) for u, v in zip(source, target)]
__A =create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 19 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.