code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
def _UpperCamelCase ( lowercase__ = 1000000 ):
__SCREAMING_SNAKE_CASE : List[str] = set(range(3 , lowercase__ , 2 ) )
primes.add(2 )
for p in range(3 , lowercase__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , lowercase__ , lowercase__ ) ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = [float(lowercase__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowercase__ , limit + 1 , lowercase__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 9 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__lowerCAmelCase : List[str] ='true'
def _UpperCamelCase ( lowercase__ , lowercase__=82 , lowercase__=16 ):
set_seed(42 )
__SCREAMING_SNAKE_CASE : Optional[int] = RegressionModel()
__SCREAMING_SNAKE_CASE : Optional[int] = deepcopy(lowercase__ )
__SCREAMING_SNAKE_CASE : Any = RegressionDataset(length=lowercase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = DataLoader(lowercase__ , batch_size=lowercase__ )
model.to(accelerator.device )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = accelerator.prepare(lowercase__ , lowercase__ )
return model, ddp_model, dataloader
def _UpperCamelCase ( lowercase__ , lowercase__=False ):
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
__SCREAMING_SNAKE_CASE : str = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
with accelerator.main_process_first():
__SCREAMING_SNAKE_CASE : Tuple = dataset.map(
lowercase__ , batched=lowercase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
__SCREAMING_SNAKE_CASE : List[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowercase__ ):
if use_longest:
return tokenizer.pad(lowercase__ , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(lowercase__ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return DataLoader(lowercase__ , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=16 )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : str = Accelerator(dispatch_batches=lowercase__ , split_batches=lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = get_dataloader(lowercase__ , not dispatch_batches )
__SCREAMING_SNAKE_CASE : List[str] = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = accelerator.prepare(lowercase__ , lowercase__ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = []
for batch in dataloader:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = batch.values()
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Dict = model(lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = [], []
for logit, targ in logits_and_targets:
logits.append(lowercase__ )
targs.append(lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = torch.cat(lowercase__ ), torch.cat(lowercase__ )
return logits, targs
def _UpperCamelCase ( lowercase__ , lowercase__=82 , lowercase__=False , lowercase__=False , lowercase__=16 ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = get_basic_setup(lowercase__ , lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = generate_predictions(lowercase__ , lowercase__ , lowercase__ )
assert (
len(lowercase__ ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowercase__ )}'''
def _UpperCamelCase ( lowercase__ = False , lowercase__ = False ):
__SCREAMING_SNAKE_CASE : Optional[Any] = evaluate.load('''glue''' , '''mrpc''' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = get_mrpc_setup(lowercase__ , lowercase__ )
# First do baseline
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = setup['''no''']
model.to(lowercase__ )
model.eval()
for batch in dataloader:
batch.to(lowercase__ )
with torch.inference_mode():
__SCREAMING_SNAKE_CASE : Dict = model(**lowercase__ )
__SCREAMING_SNAKE_CASE : Dict = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowercase__ , references=batch['''labels'''] )
__SCREAMING_SNAKE_CASE : int = metric.compute()
# Then do distributed
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
__SCREAMING_SNAKE_CASE : int = model(**lowercase__ )
__SCREAMING_SNAKE_CASE : str = outputs.logits.argmax(dim=-1 )
__SCREAMING_SNAKE_CASE : Any = batch['''labels''']
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowercase__ , references=lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Dict = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(lowercase__ , lowercase__ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__SCREAMING_SNAKE_CASE : List[Any] = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(lowercase__ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
__SCREAMING_SNAKE_CASE : Tuple = Accelerator()
test_torch_metrics(lowercase__ , 512 )
accelerator.state._reset_state()
def _UpperCamelCase ( lowercase__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 9 | 1 |
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
__lowerCAmelCase : Union[str, Any] =3
def _UpperCamelCase ( lowercase__ ):
print('''Generating primitive root of p''' )
while True:
__SCREAMING_SNAKE_CASE : Tuple = random.randrange(3 , lowercase__ )
if pow(lowercase__ , 2 , lowercase__ ) == 1:
continue
if pow(lowercase__ , lowercase__ , lowercase__ ) == 1:
continue
return g
def _UpperCamelCase ( lowercase__ ):
print('''Generating prime p...''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = rabin_miller.generate_large_prime(lowercase__ ) # select large prime number.
__SCREAMING_SNAKE_CASE : Dict = primitive_root(lowercase__ ) # one primitive root on modulo p.
__SCREAMING_SNAKE_CASE : int = random.randrange(3 , lowercase__ ) # private_key -> have to be greater than 2 for safety.
__SCREAMING_SNAKE_CASE : Optional[Any] = cryptomath.find_mod_inverse(pow(lowercase__ , lowercase__ , lowercase__ ) , lowercase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = (key_size, e_a, e_a, p)
__SCREAMING_SNAKE_CASE : str = (key_size, d)
return public_key, private_key
def _UpperCamelCase ( lowercase__ , lowercase__ ):
if os.path.exists(F'''{name}_pubkey.txt''' ) or os.path.exists(F'''{name}_privkey.txt''' ):
print('''\nWARNING:''' )
print(
F'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = generate_key(lowercase__ )
print(F'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(F'''{name}_pubkey.txt''' , '''w''' ) as fo:
fo.write(F'''{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}''' )
print(F'''Writing private key to file {name}_privkey.txt...''' )
with open(F'''{name}_privkey.txt''' , '''w''' ) as fo:
fo.write(F'''{private_key[0]},{private_key[1]}''' )
def _UpperCamelCase ( ):
print('''Making key files...''' )
make_key_files('''elgamal''' , 2048 )
print('''Key files generation successful''' )
if __name__ == "__main__":
main()
| 9 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__lowerCAmelCase : Union[str, Any] ={
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
}
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
if got_ver is None or want_ver is None:
raise ValueError(
F'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
F''' reinstalling {pkg}.''' )
if not ops[op](version.parse(lowercase__ ) , version.parse(lowercase__ ) ):
raise ImportError(
F'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def _UpperCamelCase ( lowercase__ , lowercase__ = None ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = F'''\n{hint}''' if hint is not None else ''''''
# non-versioned check
if re.match(R'''^[\w_\-\d]+$''' , lowercase__ ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = requirement, None, None
else:
__SCREAMING_SNAKE_CASE : List[Any] = re.findall(R'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''' , lowercase__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
F''' got {requirement}''' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = match[0]
__SCREAMING_SNAKE_CASE : Optional[int] = want_full.split(''',''' ) # there could be multiple requirements
__SCREAMING_SNAKE_CASE : Optional[Any] = {}
for w in want_range:
__SCREAMING_SNAKE_CASE : Any = re.findall(R'''^([\s!=<>]{1,2})(.+)''' , lowercase__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
F''' but got {requirement}''' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = match[0]
__SCREAMING_SNAKE_CASE : List[Any] = want_ver
if op not in ops:
raise ValueError(F'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
__SCREAMING_SNAKE_CASE : Optional[Any] = '''.'''.join([str(lowercase__ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
return
# check if any version is installed
try:
__SCREAMING_SNAKE_CASE : Optional[int] = importlib.metadata.version(lowercase__ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(lowercase__ , lowercase__ )
| 9 | 1 |
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--model_ckpt''' , type=lowercase__ , default='''microsoft/unixcoder-base-nine''' )
parser.add_argument('''--num_epochs''' , type=lowercase__ , default=5 )
parser.add_argument('''--batch_size''' , type=lowercase__ , default=6 )
parser.add_argument('''--gradient_accumulation_steps''' , type=lowercase__ , default=1 )
parser.add_argument('''--freeze''' , type=lowercase__ , default=lowercase__ )
parser.add_argument('''--learning_rate''' , type=lowercase__ , default=5e-4 )
parser.add_argument('''--seed''' , type=lowercase__ , default=0 )
parser.add_argument('''--lr_scheduler_type''' , type=lowercase__ , default='''cosine''' )
parser.add_argument('''--num_warmup_steps''' , type=lowercase__ , default=10 )
parser.add_argument('''--weight_decay''' , type=lowercase__ , default=0.01 )
parser.add_argument('''--output_dir''' , type=lowercase__ , default='''./results''' )
return parser.parse_args()
__lowerCAmelCase : List[Any] =load('accuracy')
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = eval_pred
__SCREAMING_SNAKE_CASE : int = np.argmax(lowercase__ , axis=1 )
return metric.compute(predictions=lowercase__ , references=lowercase__ )
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :Any , lowerCAmelCase__ :Union[str, Any] ) -> None:
super().__init__()
__SCREAMING_SNAKE_CASE : Dict = trainer
def __magic_name__( self :Dict , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str] , **lowerCAmelCase__ :int ) -> Optional[int]:
if control.should_evaluate:
__SCREAMING_SNAKE_CASE : List[str] = deepcopy(lowerCAmelCase__ )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='''train''' )
return control_copy
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : List[Any] = get_args()
set_seed(args.seed )
__SCREAMING_SNAKE_CASE : Optional[int] = load_dataset('''codeparrot/codecomplex''' , split='''train''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = dataset.train_test_split(test_size=0.2 )
__SCREAMING_SNAKE_CASE : int = train_test['''test'''].train_test_split(test_size=0.5 )
__SCREAMING_SNAKE_CASE : Optional[int] = DatasetDict(
{
'''train''': train_test['''train'''],
'''test''': test_validation['''train'''],
'''valid''': test_validation['''test'''],
} )
print('''Loading tokenizer and model''' )
__SCREAMING_SNAKE_CASE : Tuple = AutoTokenizer.from_pretrained(args.model_ckpt )
__SCREAMING_SNAKE_CASE : Dict = tokenizer.eos_token
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
__SCREAMING_SNAKE_CASE : str = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
__SCREAMING_SNAKE_CASE : Any = False
__SCREAMING_SNAKE_CASE : Any = ClassLabel(num_classes=7 , names=list(set(train_test_validation['''train''']['''complexity'''] ) ) )
def tokenize(lowercase__ ):
__SCREAMING_SNAKE_CASE : Any = tokenizer(example['''src'''] , truncation=lowercase__ , max_length=1024 )
__SCREAMING_SNAKE_CASE : List[str] = labels.straint(example['''complexity'''] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
__SCREAMING_SNAKE_CASE : Union[str, Any] = train_test_validation.map(
lowercase__ , batched=lowercase__ , remove_columns=train_test_validation['''train'''].column_names , )
__SCREAMING_SNAKE_CASE : Dict = DataCollatorWithPadding(tokenizer=lowercase__ )
__SCREAMING_SNAKE_CASE : Any = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='''epoch''' , save_strategy='''epoch''' , logging_strategy='''epoch''' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='''accuracy''' , run_name='''complexity-java''' , report_to='''wandb''' , )
__SCREAMING_SNAKE_CASE : Any = Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=tokenized_datasets['''train'''] , eval_dataset=tokenized_datasets['''valid'''] , tokenizer=lowercase__ , data_collator=lowercase__ , compute_metrics=lowercase__ , )
print('''Training...''' )
trainer.add_callback(CustomCallback(lowercase__ ) )
trainer.train()
if __name__ == "__main__":
main()
| 9 |
from __future__ import annotations
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = 0.00
__SCREAMING_SNAKE_CASE : List[str] = 0
for resistor in resistors:
if resistor <= 0:
__SCREAMING_SNAKE_CASE : Any = F'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(lowercase__ )
first_sum += 1 / float(lowercase__ )
index += 1
return 1 / first_sum
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = 0.00
__SCREAMING_SNAKE_CASE : int = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
__SCREAMING_SNAKE_CASE : Tuple = F'''Resistor at index {index} has a negative value!'''
raise ValueError(lowercase__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 | 1 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowercase :
'''simple docstring'''
def __init__( self :Tuple , lowerCAmelCase__ :int , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :List[Any]=3 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :Optional[Any]=2 , lowerCAmelCase__ :Dict=7 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :str=99 , lowerCAmelCase__ :Optional[Any]=36 , lowerCAmelCase__ :Optional[int]=2 , lowerCAmelCase__ :int=4 , lowerCAmelCase__ :List[str]=37 , lowerCAmelCase__ :Dict="gelu" , lowerCAmelCase__ :List[Any]=0.1 , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :Optional[Any]=512 , lowerCAmelCase__ :List[str]=16 , lowerCAmelCase__ :Union[str, Any]=2 , lowerCAmelCase__ :str=0.02 , lowerCAmelCase__ :Optional[Any]=6 , lowerCAmelCase__ :int=6 , lowerCAmelCase__ :Union[str, Any]=3 , lowerCAmelCase__ :Union[str, Any]=4 , lowerCAmelCase__ :Union[str, Any]=None , lowerCAmelCase__ :List[Any]=1_000 , ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = parent
__SCREAMING_SNAKE_CASE : Optional[int] = batch_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
__SCREAMING_SNAKE_CASE : str = image_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = patch_size
__SCREAMING_SNAKE_CASE : Any = is_training
__SCREAMING_SNAKE_CASE : Dict = use_input_mask
__SCREAMING_SNAKE_CASE : Dict = use_token_type_ids
__SCREAMING_SNAKE_CASE : int = use_labels
__SCREAMING_SNAKE_CASE : Dict = vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = hidden_size
__SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
__SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
__SCREAMING_SNAKE_CASE : List[str] = intermediate_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
__SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Any = max_position_embeddings
__SCREAMING_SNAKE_CASE : str = type_vocab_size
__SCREAMING_SNAKE_CASE : int = type_sequence_label_size
__SCREAMING_SNAKE_CASE : Any = initializer_range
__SCREAMING_SNAKE_CASE : Optional[int] = coordinate_size
__SCREAMING_SNAKE_CASE : Optional[Any] = shape_size
__SCREAMING_SNAKE_CASE : Optional[Any] = num_labels
__SCREAMING_SNAKE_CASE : Optional[int] = num_choices
__SCREAMING_SNAKE_CASE : Optional[int] = scope
__SCREAMING_SNAKE_CASE : str = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__SCREAMING_SNAKE_CASE : Optional[Any] = text_seq_length
__SCREAMING_SNAKE_CASE : List[Any] = (image_size // patch_size) ** 2 + 1
__SCREAMING_SNAKE_CASE : Optional[int] = self.text_seq_length + self.image_seq_length
def __magic_name__( self :List[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__SCREAMING_SNAKE_CASE : Tuple = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__SCREAMING_SNAKE_CASE : List[str] = bbox[i, j, 3]
__SCREAMING_SNAKE_CASE : Optional[Any] = bbox[i, j, 1]
__SCREAMING_SNAKE_CASE : List[Any] = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__SCREAMING_SNAKE_CASE : Tuple = bbox[i, j, 2]
__SCREAMING_SNAKE_CASE : Tuple = bbox[i, j, 0]
__SCREAMING_SNAKE_CASE : str = tmp_coordinate
__SCREAMING_SNAKE_CASE : str = tf.constant(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE : str = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE : Optional[Any] = random_attention_mask([self.batch_size, self.text_seq_length] )
__SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE : int = None
__SCREAMING_SNAKE_CASE : Tuple = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : str = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __magic_name__( self :str , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Any ) -> List[str]:
__SCREAMING_SNAKE_CASE : List[Any] = TFLayoutLMvaModel(config=lowerCAmelCase__ )
# text + image
__SCREAMING_SNAKE_CASE : List[str] = model(lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , training=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , training=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , training=lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__SCREAMING_SNAKE_CASE : int = model(lowerCAmelCase__ , training=lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__SCREAMING_SNAKE_CASE : Any = model({'''pixel_values''': pixel_values} , training=lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __magic_name__( self :str , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : Any = self.num_labels
__SCREAMING_SNAKE_CASE : List[str] = TFLayoutLMvaForSequenceClassification(config=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , training=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__( self :List[str] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :str , lowerCAmelCase__ :Union[str, Any] ) -> Any:
__SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
__SCREAMING_SNAKE_CASE : Optional[int] = TFLayoutLMvaForTokenClassification(config=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , training=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __magic_name__( self :Dict , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : int = 2
__SCREAMING_SNAKE_CASE : str = TFLayoutLMvaForQuestionAnswering(config=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , training=lowerCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__( self :Optional[int] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) : Dict = config_and_inputs
__SCREAMING_SNAKE_CASE : int = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class _lowercase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE__ : int = (
{'''document-question-answering''': TFLayoutLMvaForQuestionAnswering, '''feature-extraction''': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Optional[int] = False
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : Optional[int] = False
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :str , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :str , lowerCAmelCase__ :str ) -> Any:
return True
def __magic_name__( self :str , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any]=False ) -> dict:
__SCREAMING_SNAKE_CASE : List[str] = copy.deepcopy(lowerCAmelCase__ )
if model_class in get_values(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Tuple = {
k: tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(lowerCAmelCase__ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__SCREAMING_SNAKE_CASE : int = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def __magic_name__( self :Tuple ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[int] = TFLayoutLMvaModelTester(self )
__SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def __magic_name__( self :str ) -> Optional[int]:
self.config_tester.run_common_tests()
def __magic_name__( self :int ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : List[str] = model_class(lowerCAmelCase__ )
if getattr(lowerCAmelCase__ , '''hf_compute_loss''' , lowerCAmelCase__ ):
# The number of elements in the loss should be the same as the number of elements in the label
__SCREAMING_SNAKE_CASE : Tuple = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=lowerCAmelCase__ )[0]
]
__SCREAMING_SNAKE_CASE : int = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__SCREAMING_SNAKE_CASE : Tuple = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = prepared_for_class.pop('''input_ids''' )
__SCREAMING_SNAKE_CASE : List[Any] = model(lowerCAmelCase__ , **lowerCAmelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__SCREAMING_SNAKE_CASE : List[str] = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = prepared_for_class.pop('''input_ids''' )
if "labels" in prepared_for_class:
__SCREAMING_SNAKE_CASE : Tuple = prepared_for_class['''labels'''].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__SCREAMING_SNAKE_CASE : Optional[int] = -100
__SCREAMING_SNAKE_CASE : Optional[Any] = tf.convert_to_tensor(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCAmelCase__ , **lowerCAmelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__SCREAMING_SNAKE_CASE : int = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCAmelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__SCREAMING_SNAKE_CASE : Any = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
# Get keys that were added with the _prepare_for_class function
__SCREAMING_SNAKE_CASE : List[str] = prepared_for_class.keys() - inputs_dict.keys()
__SCREAMING_SNAKE_CASE : str = inspect.signature(model.call ).parameters
__SCREAMING_SNAKE_CASE : Optional[int] = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__SCREAMING_SNAKE_CASE : Any = {0: '''input_ids'''}
for label_key in label_keys:
__SCREAMING_SNAKE_CASE : Optional[int] = signature_names.index(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = label_key
__SCREAMING_SNAKE_CASE : List[Any] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__SCREAMING_SNAKE_CASE : int = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__SCREAMING_SNAKE_CASE : Union[str, Any] = prepared_for_class[value]
__SCREAMING_SNAKE_CASE : Dict = tuple(lowerCAmelCase__ )
# Send to model
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def __magic_name__( self :Optional[Any] ) -> Tuple:
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :Optional[int] ) -> Tuple:
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__SCREAMING_SNAKE_CASE : Any = type
self.model_tester.create_and_check_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :str ) -> Union[str, Any]:
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :str ) -> Dict:
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :Optional[Any] ) -> Any:
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def __magic_name__( self :int ) -> Optional[Any]:
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : List[Any] = TFLayoutLMvaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __magic_name__( self :Tuple ) -> Optional[int]:
return LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase__ ) if is_vision_available() else None
@slow
def __magic_name__( self :Tuple ) -> int:
__SCREAMING_SNAKE_CASE : Any = TFLayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' )
__SCREAMING_SNAKE_CASE : Optional[int] = self.default_image_processor
__SCREAMING_SNAKE_CASE : Dict = prepare_img()
__SCREAMING_SNAKE_CASE : int = image_processor(images=lowerCAmelCase__ , return_tensors='''tf''' ).pixel_values
__SCREAMING_SNAKE_CASE : str = tf.constant([[1, 2]] )
__SCREAMING_SNAKE_CASE : int = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__SCREAMING_SNAKE_CASE : Optional[Any] = model(input_ids=lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , training=lowerCAmelCase__ )
# verify the logits
__SCREAMING_SNAKE_CASE : Optional[int] = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 9 |
from ..utils import DummyObject, requires_backends
class _lowercase ( metaclass=A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ['''keras_nlp''']
def __init__( self :Tuple , *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :Dict ) -> Dict:
requires_backends(self , ['''keras_nlp'''] )
| 9 | 1 |
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
if isinstance(lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Any = np.full((len(lowercase__ ), sequence_length, 2) , lowercase__ )
else:
__SCREAMING_SNAKE_CASE : List[Any] = np.full((len(lowercase__ ), sequence_length) , lowercase__ )
for i, tensor in enumerate(lowercase__ ):
if padding_side == "right":
if isinstance(lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = tensor[:sequence_length]
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = tensor[:sequence_length]
else:
if isinstance(lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = tensor[:sequence_length]
else:
__SCREAMING_SNAKE_CASE : Optional[int] = tensor[:sequence_length]
return out_tensor.tolist()
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = ord(lowercase__ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
__SCREAMING_SNAKE_CASE : List[str] = unicodedata.category(lowercase__ )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : PreTrainedTokenizerBase
SCREAMING_SNAKE_CASE__ : Union[bool, str, PaddingStrategy] = True
SCREAMING_SNAKE_CASE__ : Optional[int] = None
SCREAMING_SNAKE_CASE__ : Optional[int] = None
SCREAMING_SNAKE_CASE__ : int = -100
SCREAMING_SNAKE_CASE__ : str = "pt"
def __magic_name__( self :Tuple , lowerCAmelCase__ :Any ) -> List[Any]:
import torch
__SCREAMING_SNAKE_CASE : int = '''label''' if '''label''' in features[0].keys() else '''labels'''
__SCREAMING_SNAKE_CASE : List[str] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__SCREAMING_SNAKE_CASE : List[str] = self.tokenizer.pad(
lowerCAmelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(batch['''entity_ids'''] ).shape[1]
__SCREAMING_SNAKE_CASE : List[str] = self.tokenizer.padding_side
if padding_side == "right":
__SCREAMING_SNAKE_CASE : Optional[int] = [
list(lowerCAmelCase__ ) + [self.label_pad_token_id] * (sequence_length - len(lowerCAmelCase__ )) for label in labels
]
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = [
[self.label_pad_token_id] * (sequence_length - len(lowerCAmelCase__ )) + list(lowerCAmelCase__ ) for label in labels
]
__SCREAMING_SNAKE_CASE : Optional[Any] = [feature['''ner_tags'''] for feature in features]
__SCREAMING_SNAKE_CASE : Optional[int] = padding_tensor(lowerCAmelCase__ , -1 , lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = [feature['''original_entity_spans'''] for feature in features]
__SCREAMING_SNAKE_CASE : int = padding_tensor(lowerCAmelCase__ , (-1, -1) , lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = {k: torch.tensor(lowerCAmelCase__ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 9 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self :Any , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple=7 , lowerCAmelCase__ :List[Any]=3 , lowerCAmelCase__ :Any=10 , lowerCAmelCase__ :Optional[int]=18 , lowerCAmelCase__ :Dict=30 , lowerCAmelCase__ :Tuple=400 , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :str=True , lowerCAmelCase__ :List[str]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :List[str]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :Optional[Any]=None , ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Dict = size if size is not None else {'''shortest_edge''': 18}
__SCREAMING_SNAKE_CASE : Optional[int] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__SCREAMING_SNAKE_CASE : Tuple = parent
__SCREAMING_SNAKE_CASE : List[Any] = batch_size
__SCREAMING_SNAKE_CASE : List[str] = num_channels
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_frames
__SCREAMING_SNAKE_CASE : Tuple = image_size
__SCREAMING_SNAKE_CASE : Optional[Any] = min_resolution
__SCREAMING_SNAKE_CASE : Any = max_resolution
__SCREAMING_SNAKE_CASE : List[Any] = do_resize
__SCREAMING_SNAKE_CASE : Optional[Any] = size
__SCREAMING_SNAKE_CASE : Optional[int] = do_normalize
__SCREAMING_SNAKE_CASE : List[Any] = image_mean
__SCREAMING_SNAKE_CASE : List[str] = image_std
__SCREAMING_SNAKE_CASE : str = crop_size
def __magic_name__( self :Tuple ) -> Any:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = VivitImageProcessor if is_vision_available() else None
def __magic_name__( self :List[str] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = VivitImageProcessingTester(self )
@property
def __magic_name__( self :int ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__( self :List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_std''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''size''' ) )
def __magic_name__( self :Optional[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def __magic_name__( self :List[Any] ) -> Union[str, Any]:
# Initialize image_processing
__SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
__SCREAMING_SNAKE_CASE : List[Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for video in video_inputs:
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE : List[str] = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __magic_name__( self :str ) -> int:
# Initialize image_processing
__SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__SCREAMING_SNAKE_CASE : List[str] = prepare_video_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for video in video_inputs:
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
__SCREAMING_SNAKE_CASE : Optional[int] = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : Any = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __magic_name__( self :Any ) -> List[str]:
# Initialize image_processing
__SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE : Optional[int] = prepare_video_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for video in video_inputs:
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
__SCREAMING_SNAKE_CASE : Tuple = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : Optional[int] = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 9 | 1 |
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__=1e-12 ):
__SCREAMING_SNAKE_CASE : int = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(lowercase__ , axis=1 ) , a_min=lowercase__ ) ).T
__SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(lowercase__ , axis=1 ) , a_min=lowercase__ ) ).T
return jnp.matmul(lowercase__ , norm_emb_a.T )
class _lowercase ( nn.Module ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : CLIPConfig
SCREAMING_SNAKE_CASE__ : jnp.dtype = jnp.floataa
def __magic_name__( self :str ) -> Any:
__SCREAMING_SNAKE_CASE : Tuple = FlaxCLIPVisionModule(self.config.vision_config )
__SCREAMING_SNAKE_CASE : str = nn.Dense(self.config.projection_dim , use_bias=lowerCAmelCase__ , dtype=self.dtype )
__SCREAMING_SNAKE_CASE : List[str] = self.param('''concept_embeds''' , jax.nn.initializers.ones , (17, self.config.projection_dim) )
__SCREAMING_SNAKE_CASE : Tuple = self.param(
'''special_care_embeds''' , jax.nn.initializers.ones , (3, self.config.projection_dim) )
__SCREAMING_SNAKE_CASE : str = self.param('''concept_embeds_weights''' , jax.nn.initializers.ones , (17,) )
__SCREAMING_SNAKE_CASE : Dict = self.param('''special_care_embeds_weights''' , jax.nn.initializers.ones , (3,) )
def __call__( self :Any , lowerCAmelCase__ :str ) -> Any:
__SCREAMING_SNAKE_CASE : Optional[int] = self.vision_model(lowerCAmelCase__ )[1]
__SCREAMING_SNAKE_CASE : Any = self.visual_projection(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = jax_cosine_distance(lowerCAmelCase__ , self.special_care_embeds )
__SCREAMING_SNAKE_CASE : Any = jax_cosine_distance(lowerCAmelCase__ , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
__SCREAMING_SNAKE_CASE : Tuple = 0.0
__SCREAMING_SNAKE_CASE : Any = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
__SCREAMING_SNAKE_CASE : Optional[int] = jnp.round(lowerCAmelCase__ , 3 )
__SCREAMING_SNAKE_CASE : str = jnp.any(special_scores > 0 , axis=1 , keepdims=lowerCAmelCase__ )
# Use a lower threshold if an image has any special care concept
__SCREAMING_SNAKE_CASE : int = is_special_care * 0.01
__SCREAMING_SNAKE_CASE : List[str] = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
__SCREAMING_SNAKE_CASE : Optional[int] = jnp.round(lowerCAmelCase__ , 3 )
__SCREAMING_SNAKE_CASE : int = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = CLIPConfig
SCREAMING_SNAKE_CASE__ : int = '''clip_input'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = FlaxStableDiffusionSafetyCheckerModule
def __init__( self :List[Any] , lowerCAmelCase__ :CLIPConfig , lowerCAmelCase__ :Optional[Tuple] = None , lowerCAmelCase__ :int = 0 , lowerCAmelCase__ :jnp.dtype = jnp.floataa , lowerCAmelCase__ :bool = True , **lowerCAmelCase__ :Optional[Any] , ) -> Optional[Any]:
if input_shape is None:
__SCREAMING_SNAKE_CASE : Optional[int] = (1, 224, 224, 3)
__SCREAMING_SNAKE_CASE : int = self.module_class(config=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **lowerCAmelCase__ )
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ , input_shape=lowerCAmelCase__ , seed=lowerCAmelCase__ , dtype=lowerCAmelCase__ , _do_init=_do_init )
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :jax.random.KeyArray , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :FrozenDict = None ) -> FrozenDict:
# init input tensor
__SCREAMING_SNAKE_CASE : Optional[int] = jax.random.normal(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = jax.random.split(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = {'''params''': params_rng, '''dropout''': dropout_rng}
__SCREAMING_SNAKE_CASE : Dict = self.module.init(lowerCAmelCase__ , lowerCAmelCase__ )['''params''']
return random_params
def __call__( self :Dict , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :dict = None , ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[str] = jnp.transpose(lowerCAmelCase__ , (0, 2, 3, 1) )
return self.module.apply(
{'''params''': params or self.params} , jnp.array(lowerCAmelCase__ , dtype=jnp.floataa ) , rngs={} , )
| 9 |
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self :Optional[Any] , lowerCAmelCase__ :Optional[Any] ) -> str:
__SCREAMING_SNAKE_CASE : Optional[Any] = parent
def __magic_name__( self :List[Any] ) -> Tuple:
return {}
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Optional[Any] = '''<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR="FFFFFF">
<HR>
<a href="http://google.com">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style="color:#0000FF">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>'''
__SCREAMING_SNAKE_CASE : str = '''
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
'''
return [html_string_a, html_string_a]
@require_bsa
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = MarkupLMFeatureExtractor if is_bsa_available() else None
def __magic_name__( self :int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = MarkupLMFeatureExtractionTester(self )
@property
def __magic_name__( self :Any ) -> Optional[Any]:
return self.feature_extract_tester.prepare_feat_extract_dict()
def __magic_name__( self :Optional[int] ) -> Any:
# Initialize feature_extractor
__SCREAMING_SNAKE_CASE : int = self.feature_extraction_class()
# Test not batched input
__SCREAMING_SNAKE_CASE : Tuple = get_html_strings()[0]
__SCREAMING_SNAKE_CASE : Dict = feature_extractor(lowerCAmelCase__ )
# fmt: off
__SCREAMING_SNAKE_CASE : str = [['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']]
__SCREAMING_SNAKE_CASE : List[str] = [['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']]
# fmt: on
self.assertEqual(encoding.nodes , lowerCAmelCase__ )
self.assertEqual(encoding.xpaths , lowerCAmelCase__ )
# Test batched
__SCREAMING_SNAKE_CASE : Tuple = get_html_strings()
__SCREAMING_SNAKE_CASE : Dict = feature_extractor(lowerCAmelCase__ )
# fmt: off
__SCREAMING_SNAKE_CASE : int = expected_nodes + [['''My First Heading''', '''My first paragraph.''']]
__SCREAMING_SNAKE_CASE : str = expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , lowerCAmelCase__ )
self.assertEqual(encoding.xpaths , lowerCAmelCase__ )
| 9 | 1 |
from ..utils import DummyObject, requires_backends
class _lowercase ( metaclass=A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ['''keras_nlp''']
def __init__( self :Tuple , *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :Dict ) -> Dict:
requires_backends(self , ['''keras_nlp'''] )
| 9 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = TransfoXLTokenizer
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
def __magic_name__( self :str ) -> Dict:
super().setUp()
__SCREAMING_SNAKE_CASE : List[str] = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
__SCREAMING_SNAKE_CASE : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __magic_name__( self :Any , **lowerCAmelCase__ :int ) -> str:
__SCREAMING_SNAKE_CASE : Optional[Any] = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :List[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : Dict = '''<unk> UNwanted , running'''
__SCREAMING_SNAKE_CASE : List[str] = '''<unk> unwanted, running'''
return input_text, output_text
def __magic_name__( self :Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : int = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(lowerCAmelCase__ , ['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [0, 4, 8, 7] )
def __magic_name__( self :Tuple ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = TransfoXLTokenizer(lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def __magic_name__( self :Tuple ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = TransfoXLTokenizer(lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __magic_name__( self :Dict ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[str] = TransfoXLTokenizer(lower_case=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
__SCREAMING_SNAKE_CASE : Optional[int] = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(tokenizer.convert_tokens_to_string(lowerCAmelCase__ ) , lowerCAmelCase__ )
def __magic_name__( self :str ) -> int:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Any = len(lowerCAmelCase__ )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(lowerCAmelCase__ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , '''new1''' )
| 9 | 1 |
class _lowercase :
'''simple docstring'''
def __init__( self :Optional[Any] , lowerCAmelCase__ :list[int] ) -> None:
__SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = [0] * len_array
if len_array > 0:
__SCREAMING_SNAKE_CASE : List[Any] = array[0]
for i in range(1 , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : int = self.prefix_sum[i - 1] + array[i]
def __magic_name__( self :Any , lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :int ) -> bool:
__SCREAMING_SNAKE_CASE : Dict = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(lowerCAmelCase__ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 |
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__=False ):
if isinstance(lowercase__ , lowercase__ ) and isinstance(lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = len(set_a.intersection(lowercase__ ) )
if alternative_union:
__SCREAMING_SNAKE_CASE : int = len(lowercase__ ) + len(lowercase__ )
else:
__SCREAMING_SNAKE_CASE : int = len(set_a.union(lowercase__ ) )
return intersection / union
if isinstance(lowercase__ , (list, tuple) ) and isinstance(lowercase__ , (list, tuple) ):
__SCREAMING_SNAKE_CASE : Dict = [element for element in set_a if element in set_b]
if alternative_union:
__SCREAMING_SNAKE_CASE : Optional[int] = len(lowercase__ ) + len(lowercase__ )
return len(lowercase__ ) / union
else:
__SCREAMING_SNAKE_CASE : Tuple = set_a + [element for element in set_b if element not in set_a]
return len(lowercase__ ) / len(lowercase__ )
return len(lowercase__ ) / len(lowercase__ )
return None
if __name__ == "__main__":
__lowerCAmelCase : List[Any] ={'a', 'b', 'c', 'd', 'e'}
__lowerCAmelCase : Optional[Any] ={'c', 'd', 'e', 'f', 'h', 'i'}
print(jaccard_similarity(set_a, set_b))
| 9 | 1 |
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self :Dict , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :Optional[int]=56 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :int=99 , lowerCAmelCase__ :str=32 , lowerCAmelCase__ :int=2 , lowerCAmelCase__ :Tuple=2 , lowerCAmelCase__ :Any=7 , lowerCAmelCase__ :Tuple="gelu_new" , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :List[Any]=0.1 , lowerCAmelCase__ :Tuple=512 , lowerCAmelCase__ :Optional[Any]=16 , lowerCAmelCase__ :Optional[Any]=2 , lowerCAmelCase__ :Optional[int]=0.02 , lowerCAmelCase__ :int=4 , lowerCAmelCase__ :Any="block_sparse" , lowerCAmelCase__ :Any=True , lowerCAmelCase__ :str=False , lowerCAmelCase__ :Optional[int]=2 , lowerCAmelCase__ :Tuple=3 , ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = parent
__SCREAMING_SNAKE_CASE : List[Any] = batch_size
__SCREAMING_SNAKE_CASE : Any = seq_length
__SCREAMING_SNAKE_CASE : List[Any] = is_training
__SCREAMING_SNAKE_CASE : Tuple = use_attention_mask
__SCREAMING_SNAKE_CASE : List[Any] = use_token_type_ids
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels
__SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
__SCREAMING_SNAKE_CASE : Tuple = hidden_size
__SCREAMING_SNAKE_CASE : str = num_hidden_layers
__SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
__SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
__SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
__SCREAMING_SNAKE_CASE : Optional[int] = type_vocab_size
__SCREAMING_SNAKE_CASE : Dict = type_sequence_label_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
__SCREAMING_SNAKE_CASE : List[Any] = num_choices
__SCREAMING_SNAKE_CASE : Optional[int] = rescale_embeddings
__SCREAMING_SNAKE_CASE : List[Any] = attention_type
__SCREAMING_SNAKE_CASE : Optional[int] = use_bias
__SCREAMING_SNAKE_CASE : Any = block_size
__SCREAMING_SNAKE_CASE : Dict = num_random_blocks
def __magic_name__( self :Tuple ) -> List[str]:
__SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : Any = None
if self.use_attention_mask:
__SCREAMING_SNAKE_CASE : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE : Tuple = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE : Any = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def __magic_name__( self :List[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = config_and_inputs
__SCREAMING_SNAKE_CASE : str = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_flax
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
SCREAMING_SNAKE_CASE__ : Optional[int] = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
def __magic_name__( self :Union[str, Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : str = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __magic_name__( self :int ) -> Optional[Any]:
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __magic_name__( self :int ) -> int:
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __magic_name__( self :Dict ) -> str:
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __magic_name__( self :Optional[int] ) -> Dict:
super().test_hidden_states_output()
@slow
def __magic_name__( self :Optional[Any] ) -> Tuple:
for model_class_name in self.all_model_classes:
__SCREAMING_SNAKE_CASE : Optional[Any] = model_class_name.from_pretrained('''google/bigbird-roberta-base''' )
self.assertIsNotNone(lowerCAmelCase__ )
def __magic_name__( self :Union[str, Any] ) -> List[Any]:
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __magic_name__( self :Optional[Any] ) -> str:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__SCREAMING_SNAKE_CASE : Any = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = model_class(lowerCAmelCase__ )
@jax.jit
def model_jitted(lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[int]=None , **lowerCAmelCase__ :Union[str, Any] ):
return model(input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ )
with self.subTest('''JIT Enabled''' ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = model_jitted(**lowerCAmelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__SCREAMING_SNAKE_CASE : Any = model_jitted(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
for jitted_output, output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def __magic_name__( self :Any , lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str]=1E-5 , lowerCAmelCase__ :Optional[int]="outputs" , lowerCAmelCase__ :Optional[Any]=None ) -> Tuple:
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith('''outputs.attentions''' ):
return
else:
super().check_pt_flax_outputs(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
| 9 |
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
__lowerCAmelCase : Optional[int] ='\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
__lowerCAmelCase : Optional[Any] ='\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
__lowerCAmelCase : Dict ='\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def _UpperCamelCase ( lowercase__ , lowercase__ ):
return float((preds == labels).mean() )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = simple_accuracy(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE : List[str] = float(fa_score(y_true=lowercase__ , y_pred=lowercase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = np.array(lowercase__ )
__SCREAMING_SNAKE_CASE : str = np.array(lowercase__ )
__SCREAMING_SNAKE_CASE : str = en_sentvecs.shape[0]
# mean centering
__SCREAMING_SNAKE_CASE : Tuple = en_sentvecs - np.mean(lowercase__ , axis=0 )
__SCREAMING_SNAKE_CASE : Optional[int] = in_sentvecs - np.mean(lowercase__ , axis=0 )
__SCREAMING_SNAKE_CASE : str = cdist(lowercase__ , lowercase__ , '''cosine''' )
__SCREAMING_SNAKE_CASE : int = np.array(range(lowercase__ ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = sim.argsort(axis=1 )[:, :10]
__SCREAMING_SNAKE_CASE : str = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def __magic_name__( self :Tuple ) -> Tuple:
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
'''references''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if self.config_name != '''cvit-mkb-clsr''' else None , )
def __magic_name__( self :List[str] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple ) -> str:
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(lowerCAmelCase__ , lowerCAmelCase__ )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(lowerCAmelCase__ , lowerCAmelCase__ )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
| 9 | 1 |
def _UpperCamelCase ( lowercase__ ):
if num < 0:
return False
__SCREAMING_SNAKE_CASE : int = num
__SCREAMING_SNAKE_CASE : int = 0
while num > 0:
__SCREAMING_SNAKE_CASE : Optional[Any] = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = nn.functional.normalize(lowercase__ )
__SCREAMING_SNAKE_CASE : Tuple = nn.functional.normalize(lowercase__ )
return torch.mm(lowercase__ , normalized_text_embeds.t() )
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = CLIPConfig
SCREAMING_SNAKE_CASE__ : List[str] = ['''CLIPEncoderLayer''']
def __init__( self :str , lowerCAmelCase__ :CLIPConfig ) -> Tuple:
super().__init__(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = CLIPVisionModel(config.vision_config )
__SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.ones(17 ) , requires_grad=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = nn.Parameter(torch.ones(3 ) , requires_grad=lowerCAmelCase__ )
@torch.no_grad()
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[int] ) -> Dict:
__SCREAMING_SNAKE_CASE : int = self.vision_model(lowerCAmelCase__ )[1] # pooled_output
__SCREAMING_SNAKE_CASE : Optional[Any] = self.visual_projection(lowerCAmelCase__ )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__SCREAMING_SNAKE_CASE : Optional[Any] = cosine_distance(lowerCAmelCase__ , self.special_care_embeds ).cpu().float().numpy()
__SCREAMING_SNAKE_CASE : Optional[Any] = cosine_distance(lowerCAmelCase__ , self.concept_embeds ).cpu().float().numpy()
__SCREAMING_SNAKE_CASE : List[Any] = []
__SCREAMING_SNAKE_CASE : List[Any] = image_embeds.shape[0]
for i in range(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
__SCREAMING_SNAKE_CASE : List[Any] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
__SCREAMING_SNAKE_CASE : List[str] = special_cos_dist[i][concept_idx]
__SCREAMING_SNAKE_CASE : Any = self.special_care_embeds_weights[concept_idx].item()
__SCREAMING_SNAKE_CASE : Union[str, Any] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} )
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
__SCREAMING_SNAKE_CASE : int = cos_dist[i][concept_idx]
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.concept_embeds_weights[concept_idx].item()
__SCREAMING_SNAKE_CASE : Tuple = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(lowerCAmelCase__ )
result.append(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = [len(res['''bad_concepts'''] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :torch.FloatTensor ) -> Any:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.vision_model(lowerCAmelCase__ )[1] # pooled_output
__SCREAMING_SNAKE_CASE : List[str] = self.visual_projection(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = cosine_distance(lowerCAmelCase__ , self.special_care_embeds )
__SCREAMING_SNAKE_CASE : Optional[int] = cosine_distance(lowerCAmelCase__ , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
__SCREAMING_SNAKE_CASE : List[Any] = 0.0
__SCREAMING_SNAKE_CASE : Union[str, Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
__SCREAMING_SNAKE_CASE : List[str] = torch.any(special_scores > 0 , dim=1 )
__SCREAMING_SNAKE_CASE : List[str] = special_care * 0.01
__SCREAMING_SNAKE_CASE : int = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
__SCREAMING_SNAKE_CASE : Optional[int] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
__SCREAMING_SNAKE_CASE : Any = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 9 | 1 |
from __future__ import annotations
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = 0.00
__SCREAMING_SNAKE_CASE : List[str] = 0
for resistor in resistors:
if resistor <= 0:
__SCREAMING_SNAKE_CASE : Any = F'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(lowercase__ )
first_sum += 1 / float(lowercase__ )
index += 1
return 1 / first_sum
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = 0.00
__SCREAMING_SNAKE_CASE : int = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
__SCREAMING_SNAKE_CASE : Tuple = F'''Resistor at index {index} has a negative value!'''
raise ValueError(lowercase__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__lowerCAmelCase : List[Any] =datasets.load_iris()
__lowerCAmelCase : Tuple =np.array(data['data'])
__lowerCAmelCase : Dict =np.array(data['target'])
__lowerCAmelCase : List[str] =data['target_names']
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : str =train_test_split(X, y)
def _UpperCamelCase ( lowercase__ , lowercase__ ):
return np.linalg.norm(np.array(lowercase__ ) - np.array(lowercase__ ) )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=5 ):
__SCREAMING_SNAKE_CASE : Optional[int] = zip(lowercase__ , lowercase__ )
# List of distances of all points from the point to be classified
__SCREAMING_SNAKE_CASE : Dict = []
for data_point in data:
__SCREAMING_SNAKE_CASE : Tuple = euclidean_distance(data_point[0] , lowercase__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
__SCREAMING_SNAKE_CASE : int = [i[1] for i in sorted(lowercase__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__SCREAMING_SNAKE_CASE : Any = Counter(lowercase__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 9 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : int =logging.get_logger(__name__)
__lowerCAmelCase : List[str] ={
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = '''convbert'''
def __init__( self :Dict , lowerCAmelCase__ :Tuple=30_522 , lowerCAmelCase__ :Optional[Any]=768 , lowerCAmelCase__ :int=12 , lowerCAmelCase__ :List[str]=12 , lowerCAmelCase__ :Optional[int]=3_072 , lowerCAmelCase__ :str="gelu" , lowerCAmelCase__ :int=0.1 , lowerCAmelCase__ :List[Any]=0.1 , lowerCAmelCase__ :List[Any]=512 , lowerCAmelCase__ :Dict=2 , lowerCAmelCase__ :Optional[int]=0.02 , lowerCAmelCase__ :List[Any]=1E-1_2 , lowerCAmelCase__ :List[str]=1 , lowerCAmelCase__ :Dict=0 , lowerCAmelCase__ :Dict=2 , lowerCAmelCase__ :int=768 , lowerCAmelCase__ :str=2 , lowerCAmelCase__ :Tuple=9 , lowerCAmelCase__ :Optional[int]=1 , lowerCAmelCase__ :List[Any]=None , **lowerCAmelCase__ :Union[str, Any] , ) -> int:
super().__init__(
pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size
__SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
__SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
__SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
__SCREAMING_SNAKE_CASE : Any = hidden_act
__SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[str] = type_vocab_size
__SCREAMING_SNAKE_CASE : int = initializer_range
__SCREAMING_SNAKE_CASE : Any = layer_norm_eps
__SCREAMING_SNAKE_CASE : str = embedding_size
__SCREAMING_SNAKE_CASE : List[str] = head_ratio
__SCREAMING_SNAKE_CASE : Optional[Any] = conv_kernel_size
__SCREAMING_SNAKE_CASE : int = num_groups
__SCREAMING_SNAKE_CASE : int = classifier_dropout
class _lowercase ( A__ ):
'''simple docstring'''
@property
def __magic_name__( self :Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__SCREAMING_SNAKE_CASE : Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__SCREAMING_SNAKE_CASE : Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 9 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class _lowercase :
'''simple docstring'''
@staticmethod
def __magic_name__( *lowerCAmelCase__ :Union[str, Any] , **lowerCAmelCase__ :str ) -> Union[str, Any]:
pass
def _UpperCamelCase ( lowercase__ ):
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
__lowerCAmelCase : str =(
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def __magic_name__( self :Any , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Any ) -> Any:
__SCREAMING_SNAKE_CASE : Optional[int] = pipeline(
'''document-question-answering''' , model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = INVOICE_URL
__SCREAMING_SNAKE_CASE : Optional[Any] = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , '''''' ) ) )
__SCREAMING_SNAKE_CASE : str = '''What is the placebo?'''
__SCREAMING_SNAKE_CASE : str = [
{
'''image''': load_image(lowerCAmelCase__ ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Tuple ) -> str:
__SCREAMING_SNAKE_CASE : Any = dqa_pipeline(lowerCAmelCase__ , top_k=2 )
self.assertEqual(
lowerCAmelCase__ , [
[
{'''score''': ANY(lowerCAmelCase__ ), '''answer''': ANY(lowerCAmelCase__ ), '''start''': ANY(lowerCAmelCase__ ), '''end''': ANY(lowerCAmelCase__ )},
{'''score''': ANY(lowerCAmelCase__ ), '''answer''': ANY(lowerCAmelCase__ ), '''start''': ANY(lowerCAmelCase__ ), '''end''': ANY(lowerCAmelCase__ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def __magic_name__( self :Dict ) -> List[str]:
__SCREAMING_SNAKE_CASE : Tuple = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
__SCREAMING_SNAKE_CASE : Dict = INVOICE_URL
__SCREAMING_SNAKE_CASE : int = '''How many cats are there?'''
__SCREAMING_SNAKE_CASE : Optional[int] = [
{'''score''': 0.0001, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0001, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
__SCREAMING_SNAKE_CASE : Tuple = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , lowerCAmelCase__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__SCREAMING_SNAKE_CASE : Any = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__SCREAMING_SNAKE_CASE : List[Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(lowerCAmelCase__ , [] )
# We can optionnally pass directly the words and bounding boxes
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : Union[str, Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , words=lowerCAmelCase__ , boxes=lowerCAmelCase__ , top_k=2 )
self.assertEqual(lowerCAmelCase__ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __magic_name__( self :int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
__SCREAMING_SNAKE_CASE : Dict = INVOICE_URL
__SCREAMING_SNAKE_CASE : Any = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : Any = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : Tuple = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : List[Any] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __magic_name__( self :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : int = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = INVOICE_URL
__SCREAMING_SNAKE_CASE : Tuple = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : List[str] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : int = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : str = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __magic_name__( self :int ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=lowerCAmelCase__ , revision='''3dc6de3''' , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = INVOICE_URL
__SCREAMING_SNAKE_CASE : str = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : Dict = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
__SCREAMING_SNAKE_CASE : Optional[int] = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , '''''' ) ) )
# This model should also work if `image` is set to None
__SCREAMING_SNAKE_CASE : str = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __magic_name__( self :str ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=lowerCAmelCase__ , revision='''3dc6de3''' , max_seq_len=50 , )
__SCREAMING_SNAKE_CASE : List[str] = INVOICE_URL
__SCREAMING_SNAKE_CASE : Dict = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : List[Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : Optional[int] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
__SCREAMING_SNAKE_CASE : List[str] = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , '''''' ) ) )
# This model should also work if `image` is set to None
__SCREAMING_SNAKE_CASE : List[Any] = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def __magic_name__( self :Union[str, Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : str = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = INVOICE_URL
__SCREAMING_SNAKE_CASE : Optional[int] = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : Tuple = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def __magic_name__( self :Union[str, Any] ) -> Tuple:
pass
| 9 | 1 |
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
__lowerCAmelCase : Optional[int] =True
except (ImportError, AttributeError):
__lowerCAmelCase : Optional[Any] =object
def _UpperCamelCase ( *lowercase__ , **lowercase__ ):
pass
__lowerCAmelCase : Union[str, Any] =False
__lowerCAmelCase : Union[str, Any] =logging.get_logger('transformers-cli/serving')
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(lowercase__ , args.host , args.port , args.workers )
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : dict
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str]
SCREAMING_SNAKE_CASE__ : Optional[List[int]]
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any
class _lowercase ( A__ ):
'''simple docstring'''
@staticmethod
def __magic_name__( lowerCAmelCase__ :ArgumentParser ) -> Any:
__SCREAMING_SNAKE_CASE : Optional[int] = parser.add_parser(
'''serve''' , help='''CLI tool to run inference requests through REST and GraphQL endpoints.''' )
serve_parser.add_argument(
'''--task''' , type=lowerCAmelCase__ , choices=get_supported_tasks() , help='''The task to run the pipeline on''' , )
serve_parser.add_argument('''--host''' , type=lowerCAmelCase__ , default='''localhost''' , help='''Interface the server will listen on.''' )
serve_parser.add_argument('''--port''' , type=lowerCAmelCase__ , default=8_888 , help='''Port the serving will listen to.''' )
serve_parser.add_argument('''--workers''' , type=lowerCAmelCase__ , default=1 , help='''Number of http workers''' )
serve_parser.add_argument('''--model''' , type=lowerCAmelCase__ , help='''Model\'s name or path to stored model.''' )
serve_parser.add_argument('''--config''' , type=lowerCAmelCase__ , help='''Model\'s config name or path to stored model.''' )
serve_parser.add_argument('''--tokenizer''' , type=lowerCAmelCase__ , help='''Tokenizer name to use.''' )
serve_parser.add_argument(
'''--device''' , type=lowerCAmelCase__ , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
serve_parser.set_defaults(func=lowerCAmelCase__ )
def __init__( self :int , lowerCAmelCase__ :Pipeline , lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> Any:
__SCREAMING_SNAKE_CASE : str = pipeline
__SCREAMING_SNAKE_CASE : Dict = host
__SCREAMING_SNAKE_CASE : List[Any] = port
__SCREAMING_SNAKE_CASE : Tuple = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'''Using serve command requires FastAPI and uvicorn. '''
'''Please install transformers with [serving]: pip install "transformers[serving]".'''
'''Or install FastAPI and uvicorn separately.''' )
else:
logger.info(f'''Serving model over {host}:{port}''' )
__SCREAMING_SNAKE_CASE : Optional[int] = FastAPI(
routes=[
APIRoute(
'''/''' , self.model_info , response_model=lowerCAmelCase__ , response_class=lowerCAmelCase__ , methods=['''GET'''] , ),
APIRoute(
'''/tokenize''' , self.tokenize , response_model=lowerCAmelCase__ , response_class=lowerCAmelCase__ , methods=['''POST'''] , ),
APIRoute(
'''/detokenize''' , self.detokenize , response_model=lowerCAmelCase__ , response_class=lowerCAmelCase__ , methods=['''POST'''] , ),
APIRoute(
'''/forward''' , self.forward , response_model=lowerCAmelCase__ , response_class=lowerCAmelCase__ , methods=['''POST'''] , ),
] , timeout=600 , )
def __magic_name__( self :Union[str, Any] ) -> Optional[int]:
run(self._app , host=self.host , port=self.port , workers=self.workers )
def __magic_name__( self :Dict ) -> Any:
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def __magic_name__( self :str , lowerCAmelCase__ :str = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) , lowerCAmelCase__ :bool = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) ) -> Any:
try:
__SCREAMING_SNAKE_CASE : int = self._pipeline.tokenizer.tokenize(lowerCAmelCase__ )
if return_ids:
__SCREAMING_SNAKE_CASE : List[str] = self._pipeline.tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
return ServeTokenizeResult(tokens=lowerCAmelCase__ , tokens_ids=lowerCAmelCase__ )
else:
return ServeTokenizeResult(tokens=lowerCAmelCase__ )
except Exception as e:
raise HTTPException(status_code=500 , detail={'''model''': '''''', '''error''': str(lowerCAmelCase__ )} )
def __magic_name__( self :List[str] , lowerCAmelCase__ :List[int] = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) , lowerCAmelCase__ :bool = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) , lowerCAmelCase__ :bool = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) , ) -> List[Any]:
try:
__SCREAMING_SNAKE_CASE : int = self._pipeline.tokenizer.decode(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return ServeDeTokenizeResult(model='''''' , text=lowerCAmelCase__ )
except Exception as e:
raise HTTPException(status_code=500 , detail={'''model''': '''''', '''error''': str(lowerCAmelCase__ )} )
async def __magic_name__( self :Dict , lowerCAmelCase__ :str=Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) ) -> Optional[int]:
# Check we don't have empty string
if len(lowerCAmelCase__ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
__SCREAMING_SNAKE_CASE : str = self._pipeline(lowerCAmelCase__ )
return ServeForwardResult(output=lowerCAmelCase__ )
except Exception as e:
raise HTTPException(500 , {'''error''': str(lowerCAmelCase__ )} )
| 9 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : Any ={'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int =[
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 9 | 1 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def _UpperCamelCase ( lowercase__ , lowercase__=False ):
try:
__SCREAMING_SNAKE_CASE : str = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__SCREAMING_SNAKE_CASE : Any = default
else:
# KEY is set, convert it to True or False.
try:
__SCREAMING_SNAKE_CASE : List[Any] = strtobool(lowercase__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'''If set, {key} must be yes or no.''' )
return _value
__lowerCAmelCase : Dict =parse_flag_from_env('RUN_SLOW', default=False)
def _UpperCamelCase ( lowercase__ ):
return unittest.skip('''Test was skipped''' )(lowercase__ )
def _UpperCamelCase ( lowercase__ ):
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(lowercase__ )
def _UpperCamelCase ( lowercase__ ):
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(lowercase__ )
def _UpperCamelCase ( lowercase__ ):
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(lowercase__ )
def _UpperCamelCase ( lowercase__ ):
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(lowercase__ )
def _UpperCamelCase ( lowercase__ ):
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(lowercase__ )
def _UpperCamelCase ( lowercase__ ):
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(lowercase__ )
def _UpperCamelCase ( lowercase__ ):
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(lowercase__ )
def _UpperCamelCase ( lowercase__ ):
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(lowercase__ )
def _UpperCamelCase ( lowercase__ ):
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(lowercase__ )
def _UpperCamelCase ( lowercase__ ):
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(lowercase__ )
def _UpperCamelCase ( lowercase__ ):
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(lowercase__ )
def _UpperCamelCase ( lowercase__ ):
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(lowercase__ )
def _UpperCamelCase ( lowercase__ ):
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(lowercase__ )
def _UpperCamelCase ( lowercase__ ):
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(lowercase__ )
def _UpperCamelCase ( lowercase__ ):
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(lowercase__ )
def _UpperCamelCase ( lowercase__=None , lowercase__=None ):
if test_case is None:
return partial(lowercase__ , version=lowercase__ )
return unittest.skipUnless(is_torch_version('''>=''' , lowercase__ ) , F'''test requires torch version >= {version}''' )(lowercase__ )
def _UpperCamelCase ( lowercase__ ):
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(lowercase__ )
def _UpperCamelCase ( lowercase__ ):
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(lowercase__ )
def _UpperCamelCase ( lowercase__ ):
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(lowercase__ )
__lowerCAmelCase : Optional[Any] =(
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def _UpperCamelCase ( lowercase__ ):
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(lowercase__ )
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = True
@classmethod
def __magic_name__( cls :Optional[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = tempfile.mkdtemp()
@classmethod
def __magic_name__( cls :List[Any] ) -> List[str]:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __magic_name__( self :List[Any] ) -> List[str]:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('''**/*''' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(lowerCAmelCase__ )
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :List[str] ) -> Any:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :str , lowerCAmelCase__ :Union[mock.Mock, List[mock.Mock]] ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[str] = mocks if isinstance(lowerCAmelCase__ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : int = AcceleratorState()
__SCREAMING_SNAKE_CASE : Optional[int] = tensor[None].clone().to(state.device )
__SCREAMING_SNAKE_CASE : List[str] = gather(lowercase__ ).cpu()
__SCREAMING_SNAKE_CASE : Union[str, Any] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , lowercase__ ):
return False
return True
class _lowercase :
'''simple docstring'''
def __init__( self :Union[str, Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :str ) -> List[str]:
__SCREAMING_SNAKE_CASE : List[str] = returncode
__SCREAMING_SNAKE_CASE : Optional[int] = stdout
__SCREAMING_SNAKE_CASE : Dict = stderr
async def _UpperCamelCase ( lowercase__ , lowercase__ ):
while True:
__SCREAMING_SNAKE_CASE : Tuple = await stream.readline()
if line:
callback(lowercase__ )
else:
break
async def _UpperCamelCase ( lowercase__ , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=False , lowercase__=False ):
if echo:
print('''\nRunning: ''' , ''' '''.join(lowercase__ ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowercase__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowercase__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__SCREAMING_SNAKE_CASE : Tuple = []
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
def tee(lowercase__ , lowercase__ , lowercase__ , lowercase__="" ):
__SCREAMING_SNAKE_CASE : Tuple = line.decode('''utf-8''' ).rstrip()
sink.append(lowercase__ )
if not quiet:
print(lowercase__ , lowercase__ , file=lowercase__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda lowercase__ : tee(lowercase__ , lowercase__ , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda lowercase__ : tee(lowercase__ , lowercase__ , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=lowercase__ , )
return _RunOutput(await p.wait() , lowercase__ , lowercase__ )
def _UpperCamelCase ( lowercase__ , lowercase__=None , lowercase__=None , lowercase__=180 , lowercase__=False , lowercase__=True ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = asyncio.get_event_loop()
__SCREAMING_SNAKE_CASE : Dict = loop.run_until_complete(
_stream_subprocess(lowercase__ , env=lowercase__ , stdin=lowercase__ , timeout=lowercase__ , quiet=lowercase__ , echo=lowercase__ ) )
__SCREAMING_SNAKE_CASE : Optional[int] = ''' '''.join(lowercase__ )
if result.returncode > 0:
__SCREAMING_SNAKE_CASE : int = '''\n'''.join(result.stderr )
raise RuntimeError(
F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
F'''The combined stderr from workers follows:\n{stderr}''' )
return result
class _lowercase ( A__ ):
'''simple docstring'''
pass
def _UpperCamelCase ( lowercase__ , lowercase__=False ):
try:
__SCREAMING_SNAKE_CASE : Union[str, Any] = subprocess.check_output(lowercase__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(lowercase__ , '''decode''' ):
__SCREAMING_SNAKE_CASE : List[str] = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F'''Command `{' '.join(lowercase__ )}` failed with the following error:\n\n{e.output.decode()}''' ) from e
| 9 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :int , lowerCAmelCase__ :NestedDataStructureLike[PathLike] , lowerCAmelCase__ :Optional[NamedSplit] = None , lowerCAmelCase__ :Optional[Features] = None , lowerCAmelCase__ :str = None , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[int] = None , **lowerCAmelCase__ :Optional[int] , ) -> Tuple:
super().__init__(
lowerCAmelCase__ , split=lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ , streaming=lowerCAmelCase__ , num_proc=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : List[str] = path_or_paths if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else {self.split: path_or_paths}
__SCREAMING_SNAKE_CASE : int = Text(
cache_dir=lowerCAmelCase__ , data_files=lowerCAmelCase__ , features=lowerCAmelCase__ , **lowerCAmelCase__ , )
def __magic_name__( self :Dict ) -> Tuple:
# Build iterable dataset
if self.streaming:
__SCREAMING_SNAKE_CASE : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__SCREAMING_SNAKE_CASE : List[str] = None
__SCREAMING_SNAKE_CASE : str = None
__SCREAMING_SNAKE_CASE : Dict = None
__SCREAMING_SNAKE_CASE : Tuple = None
self.builder.download_and_prepare(
download_config=lowerCAmelCase__ , download_mode=lowerCAmelCase__ , verification_mode=lowerCAmelCase__ , base_path=lowerCAmelCase__ , num_proc=self.num_proc , )
__SCREAMING_SNAKE_CASE : Optional[int] = self.builder.as_dataset(
split=self.split , verification_mode=lowerCAmelCase__ , in_memory=self.keep_in_memory )
return dataset
| 9 | 1 |
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
__lowerCAmelCase : List[str] =logging.get_logger(__name__)
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :int , *lowerCAmelCase__ :str , **lowerCAmelCase__ :Any ) -> None:
warnings.warn(
'''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DonutImageProcessor instead.''' , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 9 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Dict = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
__SCREAMING_SNAKE_CASE : List[str] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert('''RGB''' )
return image
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = dct.pop(lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = val
def _UpperCamelCase ( lowercase__ , lowercase__ ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
__SCREAMING_SNAKE_CASE : int = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__SCREAMING_SNAKE_CASE : Optional[int] = torch.cat((q_bias, torch.zeros_like(lowercase__ , requires_grad=lowercase__ ), v_bias) )
__SCREAMING_SNAKE_CASE : Optional[Any] = qkv_bias
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Any = 364 if '''coco''' in model_name else 224
__SCREAMING_SNAKE_CASE : List[str] = BlipaVisionConfig(image_size=lowercase__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=lowercase__ ).to_dict()
elif "opt-6.7b" in model_name:
__SCREAMING_SNAKE_CASE : List[Any] = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=lowercase__ ).to_dict()
elif "t5-xl" in model_name:
__SCREAMING_SNAKE_CASE : Optional[Any] = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
__SCREAMING_SNAKE_CASE : Optional[int] = BlipaConfig(vision_config=lowercase__ , text_config=lowercase__ )
return config, image_size
@torch.no_grad()
def _UpperCamelCase ( lowercase__ , lowercase__=None , lowercase__=False ):
__SCREAMING_SNAKE_CASE : Any = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
__SCREAMING_SNAKE_CASE : str = tokenizer('''\n''' , add_special_tokens=lowercase__ ).input_ids[0]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = get_blipa_config(lowercase__ , eos_token_id=lowercase__ )
__SCREAMING_SNAKE_CASE : int = BlipaForConditionalGeneration(lowercase__ ).eval()
__SCREAMING_SNAKE_CASE : int = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
__SCREAMING_SNAKE_CASE : List[str] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = load_model_and_preprocess(
name=lowercase__ , model_type=lowercase__ , is_eval=lowercase__ , device=lowercase__ )
original_model.eval()
print('''Done!''' )
# update state dict keys
__SCREAMING_SNAKE_CASE : List[str] = original_model.state_dict()
__SCREAMING_SNAKE_CASE : Optional[int] = create_rename_keys(lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__SCREAMING_SNAKE_CASE : Tuple = state_dict.pop(lowercase__ )
if key.startswith('''Qformer.bert''' ):
__SCREAMING_SNAKE_CASE : List[str] = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
__SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
__SCREAMING_SNAKE_CASE : Dict = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
__SCREAMING_SNAKE_CASE : Tuple = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
__SCREAMING_SNAKE_CASE : List[str] = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
__SCREAMING_SNAKE_CASE : Tuple = key.replace('''t5''' , '''language''' )
__SCREAMING_SNAKE_CASE : Tuple = val
# read in qv biases
read_in_q_v_bias(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = hf_model.load_state_dict(lowercase__ , strict=lowercase__ )
assert len(lowercase__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__SCREAMING_SNAKE_CASE : List[str] = load_demo_image()
__SCREAMING_SNAKE_CASE : Any = vis_processors['''eval'''](lowercase__ ).unsqueeze(0 ).to(lowercase__ )
__SCREAMING_SNAKE_CASE : str = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(lowercase__ )
# create processor
__SCREAMING_SNAKE_CASE : List[Any] = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=lowercase__ , image_std=lowercase__ )
__SCREAMING_SNAKE_CASE : int = BlipaProcessor(image_processor=lowercase__ , tokenizer=lowercase__ )
__SCREAMING_SNAKE_CASE : Any = processor(images=lowercase__ , return_tensors='''pt''' ).pixel_values.to(lowercase__ )
# make sure processor creates exact same pixel values
assert torch.allclose(lowercase__ , lowercase__ )
original_model.to(lowercase__ )
hf_model.to(lowercase__ )
with torch.no_grad():
if "opt" in model_name:
__SCREAMING_SNAKE_CASE : Dict = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
__SCREAMING_SNAKE_CASE : Dict = hf_model(lowercase__ , lowercase__ ).logits
else:
__SCREAMING_SNAKE_CASE : int = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
__SCREAMING_SNAKE_CASE : List[Any] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
__SCREAMING_SNAKE_CASE : Optional[int] = hf_model(lowercase__ , lowercase__ , labels=lowercase__ ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__SCREAMING_SNAKE_CASE : Dict = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=lowercase__ )
assert torch.allclose(logits[0, :3, :3] , lowercase__ , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__SCREAMING_SNAKE_CASE : Any = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=lowercase__ )
else:
# cast to same type
__SCREAMING_SNAKE_CASE : Optional[Any] = logits.dtype
assert torch.allclose(original_logits.to(lowercase__ ) , lowercase__ , atol=1e-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
__SCREAMING_SNAKE_CASE : Any = ''''''
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(lowercase__ , return_tensors='''pt''' ).input_ids.to(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = original_model.generate({'''image''': original_pixel_values} )
__SCREAMING_SNAKE_CASE : Union[str, Any] = hf_model.generate(
lowercase__ , lowercase__ , do_sample=lowercase__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = input_ids.shape[1]
__SCREAMING_SNAKE_CASE : Any = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = [text.strip() for text in output_text]
print('''HF generation:''' , lowercase__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(lowercase__ )
hf_model.save_pretrained(lowercase__ )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
__lowerCAmelCase : List[str] =argparse.ArgumentParser()
__lowerCAmelCase : Tuple =[
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
__lowerCAmelCase : List[Any] =parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 9 | 1 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
__lowerCAmelCase : List[Any] =os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f"""{bindir}/../../examples/pytorch/translation"""):
from run_translation import main # noqa
set_seed(4_2)
__lowerCAmelCase : Any ='sshleifer/student_marian_en_ro_6_1'
__lowerCAmelCase : Optional[int] ='sshleifer/tiny-mbart'
@require_torch
class _lowercase ( A__ ):
'''simple docstring'''
def __magic_name__( self :str , lowerCAmelCase__ :List[Any]=False , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :Any=True , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :Tuple=True , ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=lowerCAmelCase__ , num_train_epochs=1 , distributed=lowerCAmelCase__ , extra_args_str=lowerCAmelCase__ , predict_with_generate=lowerCAmelCase__ , do_train=lowerCAmelCase__ , do_eval=lowerCAmelCase__ , do_predict=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Dict = TrainerState.load_from_json(os.path.join(lowerCAmelCase__ , '''trainer_state.json''' ) ).log_history
if not do_eval:
return
__SCREAMING_SNAKE_CASE : str = [log for log in logs if '''eval_loss''' in log.keys()]
__SCREAMING_SNAKE_CASE : Dict = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
__SCREAMING_SNAKE_CASE : List[Any] = eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] , lowerCAmelCase__ )
assert not math.isnan(float(last_step_stats['''eval_loss'''] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def __magic_name__( self :Optional[Any] ) -> Dict:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def __magic_name__( self :str ) -> Tuple:
self.run_seqaseq_quick(distributed=lowerCAmelCase__ )
@require_torch_multi_gpu
def __magic_name__( self :Tuple ) -> List[Any]:
self.run_seqaseq_quick(distributed=lowerCAmelCase__ )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __magic_name__( self :Dict ) -> Optional[Any]:
self.run_seqaseq_quick(distributed=lowerCAmelCase__ , extra_args_str='''--sharded_ddp simple''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __magic_name__( self :Any ) -> List[Any]:
self.run_seqaseq_quick(distributed=lowerCAmelCase__ , extra_args_str='''--sharded_ddp simple --fp16''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __magic_name__( self :int ) -> Any:
self.run_seqaseq_quick(distributed=lowerCAmelCase__ , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=lowerCAmelCase__ )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __magic_name__( self :str ) -> Dict:
self.run_seqaseq_quick(
distributed=lowerCAmelCase__ , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=lowerCAmelCase__ )
@require_apex
@require_torch_gpu
def __magic_name__( self :Dict ) -> List[Any]:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=lowerCAmelCase__ , extra_args_str='''--fp16 --fp16_backend=apex''' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=lowerCAmelCase__ , extra_args_str='''--fp16 --fp16_backend=apex''' )
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''] )
@require_torch_multi_gpu
def __magic_name__( self :int , lowerCAmelCase__ :int ) -> Tuple:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
__SCREAMING_SNAKE_CASE : Union[str, Any] = experiments[experiment_id]
__SCREAMING_SNAKE_CASE : List[str] = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
__SCREAMING_SNAKE_CASE : str = '''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**lowerCAmelCase__ , extra_args_str=data['''extra_args_str'''] )
__SCREAMING_SNAKE_CASE : int = len(re.findall(lowerCAmelCase__ , cl.err ) )
self.assertEqual(lowerCAmelCase__ , data['''n_matches'''] )
@slow
def __magic_name__( self :Any ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[str] = self.run_trainer(
eval_steps=2 , max_len=128 , model_name=lowerCAmelCase__ , learning_rate=3E-4 , num_train_epochs=10 , distributed=lowerCAmelCase__ , )
# Check metrics
__SCREAMING_SNAKE_CASE : int = TrainerState.load_from_json(os.path.join(lowerCAmelCase__ , '''trainer_state.json''' ) ).log_history
__SCREAMING_SNAKE_CASE : List[Any] = [log for log in logs if '''eval_loss''' in log.keys()]
__SCREAMING_SNAKE_CASE : str = eval_metrics[0]
__SCREAMING_SNAKE_CASE : Tuple = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] , lowerCAmelCase__ )
# test if do_predict saves generations and metrics
__SCREAMING_SNAKE_CASE : Dict = os.listdir(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = {os.path.basename(lowerCAmelCase__ ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def __magic_name__( self :Any ) -> Optional[Any]:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(lowerCAmelCase__ :str ) -> Tuple[int, float]:
__SCREAMING_SNAKE_CASE : str = '''--skip_memory_metrics 0'''
__SCREAMING_SNAKE_CASE : int = self.run_trainer(
max_len=128 , model_name=lowerCAmelCase__ , learning_rate=3E-4 , num_train_epochs=1 , optim=lowerCAmelCase__ , distributed=lowerCAmelCase__ , extra_args_str=lowerCAmelCase__ , do_eval=lowerCAmelCase__ , do_predict=lowerCAmelCase__ , n_gpus_to_use=1 , )
# Check metrics
__SCREAMING_SNAKE_CASE : Optional[Any] = TrainerState.load_from_json(Path(lowerCAmelCase__ , '''trainer_state.json''' ) ).log_history
__SCREAMING_SNAKE_CASE : Optional[Any] = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**20 )
__SCREAMING_SNAKE_CASE : Optional[Any] = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**20 )
__SCREAMING_SNAKE_CASE : List[str] = logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
__SCREAMING_SNAKE_CASE : Optional[Any] = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
__SCREAMING_SNAKE_CASE : Optional[Any] = gpu_peak_mem_orig + gpu_alloc_mem_orig
__SCREAMING_SNAKE_CASE : str = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
__SCREAMING_SNAKE_CASE : Optional[int] = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
__SCREAMING_SNAKE_CASE : Union[str, Any] = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
lowerCAmelCase__ , lowerCAmelCase__ , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
f''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
f''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
lowerCAmelCase__ , lowerCAmelCase__ , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
f''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
f''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
lowerCAmelCase__ , lowerCAmelCase__ , f'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :float = 3E-3 , lowerCAmelCase__ :str = "adafactor" , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :str = None , lowerCAmelCase__ :int = 0 , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :int = None , ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Any = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_auto_remove_tmp_dir()
__SCREAMING_SNAKE_CASE : List[str] = f'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(lowerCAmelCase__ )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(lowerCAmelCase__ )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
__SCREAMING_SNAKE_CASE : Any = f'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(lowerCAmelCase__ )}
'''.split()
__SCREAMING_SNAKE_CASE : str = '''
--do_predict
'''.split()
__SCREAMING_SNAKE_CASE : Any = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
__SCREAMING_SNAKE_CASE : int = get_gpu_count()
__SCREAMING_SNAKE_CASE : Tuple = get_torch_dist_unique_port()
__SCREAMING_SNAKE_CASE : Union[str, Any] = f'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
__SCREAMING_SNAKE_CASE : int = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowerCAmelCase__ , env=self.get_env() )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = ['''run_translation.py'''] + args
with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ):
main()
return output_dir
| 9 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
__lowerCAmelCase : Optional[Any] ={'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : List[str] ={
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
__lowerCAmelCase : Optional[int] ={
'gpt-neox-20b': 2_0_4_8,
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Optional[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self :int , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :List[Any]=None , lowerCAmelCase__ :str="<|endoftext|>" , lowerCAmelCase__ :str="<|endoftext|>" , lowerCAmelCase__ :Dict="<|endoftext|>" , lowerCAmelCase__ :Union[str, Any]=False , **lowerCAmelCase__ :List[str] , ) -> Any:
super().__init__(
lowerCAmelCase__ , lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , lowerCAmelCase__ ) != add_prefix_space:
__SCREAMING_SNAKE_CASE : List[str] = getattr(lowerCAmelCase__ , pre_tok_state.pop('''type''' ) )
__SCREAMING_SNAKE_CASE : str = add_prefix_space
__SCREAMING_SNAKE_CASE : Any = pre_tok_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = add_prefix_space
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]:
__SCREAMING_SNAKE_CASE : List[str] = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :"Conversation" ) -> List[int]:
__SCREAMING_SNAKE_CASE : Optional[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) + [self.eos_token_id] )
if len(lowerCAmelCase__ ) > self.model_max_length:
__SCREAMING_SNAKE_CASE : List[str] = input_ids[-self.model_max_length :]
return input_ids
| 9 | 1 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = ConsistencyModelPipeline
SCREAMING_SNAKE_CASE__ : List[Any] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
SCREAMING_SNAKE_CASE__ : Optional[Any] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
SCREAMING_SNAKE_CASE__ : List[str] = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
@property
def __magic_name__( self :Optional[int] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Tuple = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' , subfolder='''test_unet''' , )
return unet
@property
def __magic_name__( self :List[str] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Dict = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' , subfolder='''test_unet_class_cond''' , )
return unet
def __magic_name__( self :Dict , lowerCAmelCase__ :List[str]=False ) -> Optional[int]:
if class_cond:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_cond_unet
else:
__SCREAMING_SNAKE_CASE : Dict = self.dummy_uncond_unet
# Default to CM multistep sampler
__SCREAMING_SNAKE_CASE : List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__SCREAMING_SNAKE_CASE : Dict = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def __magic_name__( self :int , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[int]=0 ) -> Any:
if str(lowerCAmelCase__ ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = {
'''batch_size''': 1,
'''num_inference_steps''': None,
'''timesteps''': [22, 0],
'''generator''': generator,
'''output_type''': '''np''',
}
return inputs
def __magic_name__( self :Any ) -> str:
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Optional[int] = ConsistencyModelPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = pipe(**lowerCAmelCase__ ).images
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : Dict = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Optional[Any] ) -> List[str]:
__SCREAMING_SNAKE_CASE : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Dict = self.get_dummy_components(class_cond=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = ConsistencyModelPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
__SCREAMING_SNAKE_CASE : Tuple = pipe(**lowerCAmelCase__ ).images
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : Dict = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :str ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Optional[int] = ConsistencyModelPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = 1
__SCREAMING_SNAKE_CASE : List[Any] = None
__SCREAMING_SNAKE_CASE : Dict = pipe(**lowerCAmelCase__ ).images
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : Optional[int] = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : int = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :str ) -> str:
__SCREAMING_SNAKE_CASE : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Any = self.get_dummy_components(class_cond=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = ConsistencyModelPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = 1
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
__SCREAMING_SNAKE_CASE : Optional[int] = pipe(**lowerCAmelCase__ ).images
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : int = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Union[str, Any] ) -> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__( self :Dict , lowerCAmelCase__ :Tuple=0 , lowerCAmelCase__ :Union[str, Any]=False , lowerCAmelCase__ :int="cpu" , lowerCAmelCase__ :Optional[Any]=torch.floataa , lowerCAmelCase__ :Union[str, Any]=(1, 3, 64, 64) ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = {
'''num_inference_steps''': None,
'''timesteps''': [22, 0],
'''class_labels''': 0,
'''generator''': generator,
'''output_type''': '''np''',
}
if get_fixed_latents:
__SCREAMING_SNAKE_CASE : str = self.get_fixed_latents(seed=lowerCAmelCase__ , device=lowerCAmelCase__ , dtype=lowerCAmelCase__ , shape=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = latents
return inputs
def __magic_name__( self :Dict , lowerCAmelCase__ :Dict=0 , lowerCAmelCase__ :List[Any]="cpu" , lowerCAmelCase__ :str=torch.floataa , lowerCAmelCase__ :Union[str, Any]=(1, 3, 64, 64) ) -> Optional[Any]:
if type(lowerCAmelCase__ ) == str:
__SCREAMING_SNAKE_CASE : str = torch.device(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = randn_tensor(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=lowerCAmelCase__ , dtype=lowerCAmelCase__ )
return latents
def __magic_name__( self :str ) -> List[str]:
__SCREAMING_SNAKE_CASE : Tuple = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__SCREAMING_SNAKE_CASE : str = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__SCREAMING_SNAKE_CASE : List[str] = ConsistencyModelPipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
pipe.to(torch_device=lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = self.get_inputs()
__SCREAMING_SNAKE_CASE : Optional[Any] = pipe(**lowerCAmelCase__ ).images
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE : Tuple = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : Dict = np.array([0.0888, 0.0881, 0.0666, 0.0479, 0.0292, 0.0195, 0.0201, 0.0163, 0.0254] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def __magic_name__( self :List[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[str] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__SCREAMING_SNAKE_CASE : Dict = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__SCREAMING_SNAKE_CASE : Optional[int] = ConsistencyModelPipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
pipe.to(torch_device=lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = self.get_inputs()
__SCREAMING_SNAKE_CASE : str = 1
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = pipe(**lowerCAmelCase__ ).images
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE : int = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.0340, 0.0152, 0.0063, 0.0267, 0.0221, 0.0107, 0.0416, 0.0186, 0.0217] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
@require_torch_a
def __magic_name__( self :Union[str, Any] ) -> int:
__SCREAMING_SNAKE_CASE : Dict = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__SCREAMING_SNAKE_CASE : Optional[Any] = ConsistencyModelPipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
pipe.to(torch_device=lowerCAmelCase__ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = self.get_inputs(get_fixed_latents=lowerCAmelCase__ , device=lowerCAmelCase__ )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=lowerCAmelCase__ , enable_math=lowerCAmelCase__ , enable_mem_efficient=lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = pipe(**lowerCAmelCase__ ).images
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : List[Any] = np.array([0.1875, 0.1428, 0.1289, 0.2151, 0.2092, 0.1477, 0.1877, 0.1641, 0.1353] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@require_torch_a
def __magic_name__( self :str ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[str] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__SCREAMING_SNAKE_CASE : Optional[int] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__SCREAMING_SNAKE_CASE : Any = ConsistencyModelPipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
pipe.to(torch_device=lowerCAmelCase__ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_inputs(get_fixed_latents=lowerCAmelCase__ , device=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = 1
__SCREAMING_SNAKE_CASE : Any = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=lowerCAmelCase__ , enable_math=lowerCAmelCase__ , enable_mem_efficient=lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : List[str] = pipe(**lowerCAmelCase__ ).images
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : Any = np.array([0.1663, 0.1948, 0.2275, 0.1680, 0.1204, 0.1245, 0.1858, 0.1338, 0.2095] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 9 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__lowerCAmelCase : Optional[Any] ='\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
__lowerCAmelCase : Any ='\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
__lowerCAmelCase : Optional[Any] =r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def __magic_name__( self :Optional[Any] ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def __magic_name__( self :Any , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Tuple = 0.0
for i, j in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
n_correct += 1.0 if math_equivalence.is_equiv(lowerCAmelCase__ , lowerCAmelCase__ ) else 0.0
__SCREAMING_SNAKE_CASE : str = n_correct / len(lowerCAmelCase__ )
return {
"accuracy": accuracy,
}
| 9 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : Tuple =logging.get_logger(__name__)
__lowerCAmelCase : str ='▁'
__lowerCAmelCase : Any ={'vocab_file': 'spiece.model'}
__lowerCAmelCase : Any ={
'vocab_file': {
'google/reformer-crime-and-punishment': (
'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'
)
}
}
__lowerCAmelCase : Optional[int] ={
'google/reformer-crime-and-punishment': 5_2_4_2_8_8,
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ['''input_ids''', '''attention_mask''']
def __init__( self :List[str] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any]="</s>" , lowerCAmelCase__ :Dict="<unk>" , lowerCAmelCase__ :int=[] , lowerCAmelCase__ :Optional[Dict[str, Any]] = None , **lowerCAmelCase__ :Optional[Any] , ) -> None:
__SCREAMING_SNAKE_CASE : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : List[str] = vocab_file
__SCREAMING_SNAKE_CASE : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase__ )
@property
def __magic_name__( self :Any ) -> List[str]:
return self.sp_model.get_piece_size()
def __magic_name__( self :Union[str, Any] ) -> Dict[str, int]:
__SCREAMING_SNAKE_CASE : List[Any] = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self :Union[str, Any] ) -> str:
__SCREAMING_SNAKE_CASE : List[Any] = self.__dict__.copy()
__SCREAMING_SNAKE_CASE : List[Any] = None
return state
def __setstate__( self :Any , lowerCAmelCase__ :int ) -> Any:
__SCREAMING_SNAKE_CASE : Optional[int] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__SCREAMING_SNAKE_CASE : Optional[Any] = {}
__SCREAMING_SNAKE_CASE : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :str ) -> List[str]:
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
def __magic_name__( self :str , lowerCAmelCase__ :Tuple ) -> Tuple:
return self.sp_model.piece_to_id(lowerCAmelCase__ )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :Optional[Any] ) -> int:
if index < self.sp_model.get_piece_size():
__SCREAMING_SNAKE_CASE : str = self.sp_model.IdToPiece(lowerCAmelCase__ )
return token
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Any ) -> int:
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : Optional[int] = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCAmelCase__ ) + token
__SCREAMING_SNAKE_CASE : Any = []
else:
current_sub_tokens.append(lowerCAmelCase__ )
out_string += self.sp_model.decode(lowerCAmelCase__ )
return out_string.strip()
def __magic_name__( self :Tuple , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__SCREAMING_SNAKE_CASE : List[str] = os.path.join(
lowerCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , '''wb''' ) as fi:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (out_vocab_file,)
| 9 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _lowercase :
'''simple docstring'''
def __init__( self :Optional[int] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :int=13 , lowerCAmelCase__ :List[str]=7 , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :str=True , lowerCAmelCase__ :List[Any]=99 , lowerCAmelCase__ :List[str]=32 , lowerCAmelCase__ :Any=5 , lowerCAmelCase__ :List[str]=4 , lowerCAmelCase__ :int=37 , lowerCAmelCase__ :Optional[int]="gelu" , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Optional[Any]=512 , lowerCAmelCase__ :Union[str, Any]=16 , lowerCAmelCase__ :Dict=2 , lowerCAmelCase__ :Tuple=0.02 , lowerCAmelCase__ :List[Any]=3 , lowerCAmelCase__ :Tuple=4 , lowerCAmelCase__ :int=None , ) -> int:
__SCREAMING_SNAKE_CASE : Dict = parent
__SCREAMING_SNAKE_CASE : Any = batch_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = seq_length
__SCREAMING_SNAKE_CASE : Optional[Any] = is_training
__SCREAMING_SNAKE_CASE : int = use_token_type_ids
__SCREAMING_SNAKE_CASE : Any = use_labels
__SCREAMING_SNAKE_CASE : Any = vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = hidden_size
__SCREAMING_SNAKE_CASE : int = num_hidden_layers
__SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : str = intermediate_size
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
__SCREAMING_SNAKE_CASE : List[str] = type_sequence_label_size
__SCREAMING_SNAKE_CASE : List[str] = initializer_range
__SCREAMING_SNAKE_CASE : Tuple = num_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_choices
__SCREAMING_SNAKE_CASE : Union[str, Any] = scope
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.vocab_size - 1
def __magic_name__( self :Optional[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE : Dict = None
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE : Optional[int] = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__SCREAMING_SNAKE_CASE : Any = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __magic_name__( self :Tuple , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Any , *lowerCAmelCase__ :Union[str, Any] ) -> Any:
__SCREAMING_SNAKE_CASE : Any = OpenAIGPTModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Dict = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , head_mask=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Dict , *lowerCAmelCase__ :List[Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = OpenAIGPTLMHeadModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__( self :Tuple , lowerCAmelCase__ :Dict , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , *lowerCAmelCase__ :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : Any = OpenAIGPTDoubleHeadsModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Any = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__( self :Dict , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :str , *lowerCAmelCase__ :Optional[int] ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
__SCREAMING_SNAKE_CASE : List[Any] = OpenAIGPTForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__( self :Optional[Any] ) -> str:
__SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) : List[str] = config_and_inputs
__SCREAMING_SNAKE_CASE : List[str] = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class _lowercase ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : str = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
SCREAMING_SNAKE_CASE__ : str = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :str , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any] ) -> Tuple:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def __magic_name__( self :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :int , lowerCAmelCase__ :int=False ) -> Dict:
__SCREAMING_SNAKE_CASE : Tuple = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__SCREAMING_SNAKE_CASE : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Tuple = inputs_dict['''labels''']
__SCREAMING_SNAKE_CASE : Dict = inputs_dict['''labels''']
__SCREAMING_SNAKE_CASE : List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
return inputs_dict
def __magic_name__( self :Optional[int] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : int = OpenAIGPTModelTester(self )
__SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase__ , n_embd=37 )
def __magic_name__( self :Any ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __magic_name__( self :List[str] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowerCAmelCase__ )
def __magic_name__( self :int ) -> int:
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> Dict:
__SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> str:
__SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowerCAmelCase__ )
@slow
def __magic_name__( self :Any ) -> List[Any]:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Dict = OpenAIGPTModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __magic_name__( self :Union[str, Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[str] = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[481, 4_735, 544]] , dtype=torch.long , device=lowerCAmelCase__ ) # the president is
__SCREAMING_SNAKE_CASE : Dict = [
481,
4_735,
544,
246,
963,
870,
762,
239,
244,
40_477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__SCREAMING_SNAKE_CASE : Dict = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ )
self.assertListEqual(output_ids[0].tolist() , lowerCAmelCase__ )
| 9 | 1 |
class _lowercase :
'''simple docstring'''
def __init__( self :Optional[Any] , lowerCAmelCase__ :Dict ) -> Optional[Any]:
# we need a list not a string, so do something to change the type
__SCREAMING_SNAKE_CASE : Optional[int] = arr.split(''',''' )
def __magic_name__( self :Optional[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = [int(self.array[0] )] * len(self.array )
__SCREAMING_SNAKE_CASE : List[str] = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
__SCREAMING_SNAKE_CASE : Dict = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
__SCREAMING_SNAKE_CASE : int = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
__lowerCAmelCase : Any =input('please input some numbers:')
__lowerCAmelCase : Tuple =SubArray(whole_array)
__lowerCAmelCase : str =array.solve_sub_array()
print(('the results is:', re))
| 9 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _UpperCamelCase ( *lowercase__ , lowercase__ = None , lowercase__=True , lowercase__=2 ):
from .. import __version__
__SCREAMING_SNAKE_CASE : Optional[Any] = take_from
__SCREAMING_SNAKE_CASE : List[str] = ()
if not isinstance(args[0] , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowercase__ ).base_version ) >= version.parse(lowercase__ ):
raise ValueError(
F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
F''' version {__version__} is >= {version_name}''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if isinstance(lowercase__ , lowercase__ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowercase__ ),)
__SCREAMING_SNAKE_CASE : List[Any] = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(lowercase__ , lowercase__ ):
values += (getattr(lowercase__ , lowercase__ ),)
__SCREAMING_SNAKE_CASE : List[str] = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
__SCREAMING_SNAKE_CASE : str = F'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
__SCREAMING_SNAKE_CASE : Any = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , lowercase__ , stacklevel=lowercase__ )
if isinstance(lowercase__ , lowercase__ ) and len(lowercase__ ) > 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1]
__SCREAMING_SNAKE_CASE : Dict = call_frame.filename
__SCREAMING_SNAKE_CASE : Optional[Any] = call_frame.lineno
__SCREAMING_SNAKE_CASE : int = call_frame.function
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(lowercase__ ) == 0:
return
elif len(lowercase__ ) == 1:
return values[0]
return values
| 9 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
__lowerCAmelCase : str ={
'facebook/data2vec-vision-base-ft': (
'https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'
),
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = '''data2vec-vision'''
def __init__( self :List[Any] , lowerCAmelCase__ :Union[str, Any]=768 , lowerCAmelCase__ :Union[str, Any]=12 , lowerCAmelCase__ :List[Any]=12 , lowerCAmelCase__ :Optional[Any]=3_072 , lowerCAmelCase__ :Dict="gelu" , lowerCAmelCase__ :List[Any]=0.0 , lowerCAmelCase__ :str=0.0 , lowerCAmelCase__ :Union[str, Any]=0.02 , lowerCAmelCase__ :int=1E-1_2 , lowerCAmelCase__ :int=224 , lowerCAmelCase__ :Union[str, Any]=16 , lowerCAmelCase__ :Tuple=3 , lowerCAmelCase__ :str=False , lowerCAmelCase__ :Union[str, Any]=False , lowerCAmelCase__ :int=False , lowerCAmelCase__ :List[Any]=False , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :List[Any]=[3, 5, 7, 11] , lowerCAmelCase__ :List[str]=[1, 2, 3, 6] , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Any=0.4 , lowerCAmelCase__ :Optional[int]=256 , lowerCAmelCase__ :Tuple=1 , lowerCAmelCase__ :Any=False , lowerCAmelCase__ :str=255 , **lowerCAmelCase__ :Optional[Any] , ) -> Dict:
super().__init__(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = hidden_size
__SCREAMING_SNAKE_CASE : str = num_hidden_layers
__SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
__SCREAMING_SNAKE_CASE : Dict = intermediate_size
__SCREAMING_SNAKE_CASE : str = hidden_act
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
__SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
__SCREAMING_SNAKE_CASE : str = image_size
__SCREAMING_SNAKE_CASE : str = patch_size
__SCREAMING_SNAKE_CASE : Tuple = num_channels
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_mask_token
__SCREAMING_SNAKE_CASE : Optional[Any] = use_absolute_position_embeddings
__SCREAMING_SNAKE_CASE : Optional[Any] = use_relative_position_bias
__SCREAMING_SNAKE_CASE : int = use_shared_relative_position_bias
__SCREAMING_SNAKE_CASE : int = layer_scale_init_value
__SCREAMING_SNAKE_CASE : Tuple = drop_path_rate
__SCREAMING_SNAKE_CASE : Any = use_mean_pooling
# decode head attributes (semantic segmentation)
__SCREAMING_SNAKE_CASE : List[Any] = out_indices
__SCREAMING_SNAKE_CASE : str = pool_scales
# auxiliary head attributes (semantic segmentation)
__SCREAMING_SNAKE_CASE : List[str] = use_auxiliary_head
__SCREAMING_SNAKE_CASE : List[Any] = auxiliary_loss_weight
__SCREAMING_SNAKE_CASE : Tuple = auxiliary_channels
__SCREAMING_SNAKE_CASE : str = auxiliary_num_convs
__SCREAMING_SNAKE_CASE : str = auxiliary_concat_input
__SCREAMING_SNAKE_CASE : Optional[int] = semantic_loss_ignore_index
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = version.parse('''1.11''' )
@property
def __magic_name__( self :Optional[int] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __magic_name__( self :List[str] ) -> float:
return 1E-4
| 9 |
from __future__ import annotations
import bisect
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 0 , lowercase__ = -1 ):
if hi < 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = len(lowercase__ )
while lo < hi:
__SCREAMING_SNAKE_CASE : Any = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__SCREAMING_SNAKE_CASE : Union[str, Any] = mid + 1
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = mid
return lo
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 0 , lowercase__ = -1 ):
if hi < 0:
__SCREAMING_SNAKE_CASE : List[Any] = len(lowercase__ )
while lo < hi:
__SCREAMING_SNAKE_CASE : Optional[int] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__SCREAMING_SNAKE_CASE : Any = mid + 1
else:
__SCREAMING_SNAKE_CASE : Optional[int] = mid
return lo
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 0 , lowercase__ = -1 ):
sorted_collection.insert(bisect_left(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) , lowercase__ )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 0 , lowercase__ = -1 ):
sorted_collection.insert(bisect_right(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) , lowercase__ )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Any = 0
__SCREAMING_SNAKE_CASE : List[Any] = len(lowercase__ ) - 1
while left <= right:
__SCREAMING_SNAKE_CASE : str = left + (right - left) // 2
__SCREAMING_SNAKE_CASE : List[str] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__SCREAMING_SNAKE_CASE : int = midpoint - 1
else:
__SCREAMING_SNAKE_CASE : Dict = midpoint + 1
return None
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = bisect.bisect_left(lowercase__ , lowercase__ )
if index != len(lowercase__ ) and sorted_collection[index] == item:
return index
return None
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
if right < left:
return None
__SCREAMING_SNAKE_CASE : int = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(lowercase__ , lowercase__ , lowercase__ , midpoint - 1 )
else:
return binary_search_by_recursion(lowercase__ , lowercase__ , midpoint + 1 , lowercase__ )
if __name__ == "__main__":
__lowerCAmelCase : Dict =input('Enter numbers separated by comma:\n').strip()
__lowerCAmelCase : str =sorted(int(item) for item in user_input.split(','))
__lowerCAmelCase : Tuple =int(input('Enter a single number to be found in the list:\n'))
__lowerCAmelCase : Tuple =binary_search(collection, target)
if result is None:
print(f"""{target} was not found in {collection}.""")
else:
print(f"""{target} was found at position {result} in {collection}.""")
| 9 | 1 |
from itertools import product
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = sides_number
__SCREAMING_SNAKE_CASE : Union[str, Any] = max_face_number * dice_number
__SCREAMING_SNAKE_CASE : List[str] = [0] * (max_total + 1)
__SCREAMING_SNAKE_CASE : List[str] = 1
__SCREAMING_SNAKE_CASE : str = range(lowercase__ , max_face_number + 1 )
for dice_numbers in product(lowercase__ , repeat=lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = sum(lowercase__ )
totals_frequencies[total] += 1
return totals_frequencies
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : List[str] = total_frequency_distribution(
sides_number=4 , dice_number=9 )
__SCREAMING_SNAKE_CASE : Tuple = total_frequency_distribution(
sides_number=6 , dice_number=6 )
__SCREAMING_SNAKE_CASE : Tuple = 0
__SCREAMING_SNAKE_CASE : List[Any] = 9
__SCREAMING_SNAKE_CASE : List[Any] = 4 * 9
__SCREAMING_SNAKE_CASE : Dict = 6
for peter_total in range(lowercase__ , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
__SCREAMING_SNAKE_CASE : str = (4**9) * (6**6)
__SCREAMING_SNAKE_CASE : str = peter_wins_count / total_games_number
__SCREAMING_SNAKE_CASE : int = round(lowercase__ , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"""{solution() = }""")
| 9 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Union[str, Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : str = '''ylacombe/bark-small'''
__SCREAMING_SNAKE_CASE : Optional[int] = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : str = '''en_speaker_1'''
__SCREAMING_SNAKE_CASE : Any = '''This is a test string'''
__SCREAMING_SNAKE_CASE : int = '''speaker_embeddings_path.json'''
__SCREAMING_SNAKE_CASE : int = '''speaker_embeddings'''
def __magic_name__( self :List[str] , **lowerCAmelCase__ :Union[str, Any] ) -> Any:
return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> int:
shutil.rmtree(self.tmpdirname )
def __magic_name__( self :Dict ) -> str:
__SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Tuple = BarkProcessor(tokenizer=lowerCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : Optional[Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __magic_name__( self :Tuple ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Dict = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
__SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __magic_name__( self :List[str] ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
__SCREAMING_SNAKE_CASE : str = 35
__SCREAMING_SNAKE_CASE : str = 2
__SCREAMING_SNAKE_CASE : List[Any] = 8
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''semantic_prompt''': np.ones(lowerCAmelCase__ ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
__SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=self.input_string , voice_preset=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
__SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = processor(text=self.input_string , voice_preset=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
__SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def __magic_name__( self :Tuple ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Any = BarkProcessor(tokenizer=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = processor(text=self.input_string )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer(
self.input_string , padding='''max_length''' , max_length=256 , add_special_tokens=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 9 | 1 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : List[str] =get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__lowerCAmelCase : str =2_5_0_0_0_4
__lowerCAmelCase : Any =2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = MBartTokenizer
SCREAMING_SNAKE_CASE__ : str = MBartTokenizerFast
SCREAMING_SNAKE_CASE__ : List[Any] = True
SCREAMING_SNAKE_CASE__ : Optional[int] = True
def __magic_name__( self :List[str] ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
__SCREAMING_SNAKE_CASE : Any = MBartTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__( self :Any ) -> Tuple:
__SCREAMING_SNAKE_CASE : Dict = MBartTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__SCREAMING_SNAKE_CASE : Any = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def __magic_name__( self :Dict ) -> Union[str, Any]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__SCREAMING_SNAKE_CASE : str = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__SCREAMING_SNAKE_CASE : Optional[int] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.save_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
__SCREAMING_SNAKE_CASE : Dict = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Checks everything loads correctly in the same way
__SCREAMING_SNAKE_CASE : str = tokenizer_r.from_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCAmelCase__ )
# Save tokenizer rust, legacy_format=True
__SCREAMING_SNAKE_CASE : Optional[Any] = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : List[str] = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Checks everything loads correctly in the same way
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r.from_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
shutil.rmtree(lowerCAmelCase__ )
# Save tokenizer rust, legacy_format=False
__SCREAMING_SNAKE_CASE : Union[str, Any] = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : Dict = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__SCREAMING_SNAKE_CASE : Dict = tokenizer_r.from_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
shutil.rmtree(lowerCAmelCase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = '''facebook/mbart-large-en-ro'''
SCREAMING_SNAKE_CASE__ : Optional[int] = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
SCREAMING_SNAKE_CASE__ : Any = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
SCREAMING_SNAKE_CASE__ : Tuple = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE]
@classmethod
def __magic_name__( cls :str ) -> int:
__SCREAMING_SNAKE_CASE : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
__SCREAMING_SNAKE_CASE : List[Any] = 1
return cls
def __magic_name__( self :Optional[Any] ) -> Dict:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 250_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 250_004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 250_020 )
def __magic_name__( self :Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> Tuple:
self.assertIn(lowerCAmelCase__ , self.tokenizer.all_special_ids )
__SCREAMING_SNAKE_CASE : int = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2]
__SCREAMING_SNAKE_CASE : str = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = 10
__SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
def __magic_name__( self :Any ) -> List[str]:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [250_026, 250_001] )
def __magic_name__( self :List[str] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Any = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = MBartTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase__ )
@require_torch
def __magic_name__( self :Dict ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[str] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , return_tensors='''pt''' )
__SCREAMING_SNAKE_CASE : List[Any] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def __magic_name__( self :Union[str, Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : int = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
__SCREAMING_SNAKE_CASE : Optional[int] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
__SCREAMING_SNAKE_CASE : str = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def __magic_name__( self :Optional[Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer(self.src_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=3 , return_tensors='''pt''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(
text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=10 , return_tensors='''pt''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = targets['''input_ids''']
__SCREAMING_SNAKE_CASE : Any = shift_tokens_right(lowerCAmelCase__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __magic_name__( self :Optional[int] ) -> Dict:
__SCREAMING_SNAKE_CASE : Dict = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
# A, test, EOS, en_XX
'''input_ids''': [[62, 3_034, 2, 250_004]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 250_001,
} , )
| 9 |
from importlib import import_module
from .logging import get_logger
__lowerCAmelCase : str =get_logger(__name__)
class _lowercase :
'''simple docstring'''
def __init__( self :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :str=None ) -> int:
__SCREAMING_SNAKE_CASE : List[str] = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('''__''' ):
setattr(self , lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = module._original_module if isinstance(lowerCAmelCase__ , _PatchedModuleObj ) else module
class _lowercase :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = []
def __init__( self :Tuple , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Any , lowerCAmelCase__ :Dict=None ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = obj
__SCREAMING_SNAKE_CASE : str = target
__SCREAMING_SNAKE_CASE : Dict = new
__SCREAMING_SNAKE_CASE : Union[str, Any] = target.split('''.''' )[0]
__SCREAMING_SNAKE_CASE : List[str] = {}
__SCREAMING_SNAKE_CASE : Tuple = attrs or []
def __enter__( self :int ) -> Dict:
*__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = self.target.split('''.''' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowerCAmelCase__ ) ):
try:
__SCREAMING_SNAKE_CASE : Any = import_module('''.'''.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
__SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(self.obj , lowerCAmelCase__ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowerCAmelCase__ , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
__SCREAMING_SNAKE_CASE : int = obj_attr
# patch at top level
setattr(self.obj , lowerCAmelCase__ , _PatchedModuleObj(lowerCAmelCase__ , attrs=self.attrs ) )
__SCREAMING_SNAKE_CASE : List[str] = getattr(self.obj , lowerCAmelCase__ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowerCAmelCase__ , lowerCAmelCase__ , _PatchedModuleObj(getattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , attrs=self.attrs ) )
__SCREAMING_SNAKE_CASE : Tuple = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
# finally set the target attribute
setattr(lowerCAmelCase__ , lowerCAmelCase__ , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
__SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(import_module('''.'''.join(lowerCAmelCase__ ) ) , lowerCAmelCase__ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowerCAmelCase__ ) is attr_value:
__SCREAMING_SNAKE_CASE : Any = getattr(self.obj , lowerCAmelCase__ )
setattr(self.obj , lowerCAmelCase__ , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
__SCREAMING_SNAKE_CASE : Union[str, Any] = globals()['''__builtins__'''][target_attr]
setattr(self.obj , lowerCAmelCase__ , self.new )
else:
raise RuntimeError(f'''Tried to patch attribute {target_attr} instead of a submodule.''' )
def __exit__( self :str , *lowerCAmelCase__ :Union[str, Any] ) -> Optional[int]:
for attr in list(self.original ):
setattr(self.obj , lowerCAmelCase__ , self.original.pop(lowerCAmelCase__ ) )
def __magic_name__( self :List[Any] ) -> List[Any]:
self.__enter__()
self._active_patches.append(self )
def __magic_name__( self :Optional[int] ) -> int:
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 9 | 1 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = XCLIPTextConfig()
# derive patch size from model name
__SCREAMING_SNAKE_CASE : Optional[int] = model_name.find('''patch''' )
__SCREAMING_SNAKE_CASE : Dict = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = XCLIPVisionConfig(patch_size=lowercase__ , num_frames=lowercase__ )
if "large" in model_name:
__SCREAMING_SNAKE_CASE : int = 768
__SCREAMING_SNAKE_CASE : Dict = 3072
__SCREAMING_SNAKE_CASE : List[Any] = 12
__SCREAMING_SNAKE_CASE : Any = 1024
__SCREAMING_SNAKE_CASE : Union[str, Any] = 4096
__SCREAMING_SNAKE_CASE : Union[str, Any] = 16
__SCREAMING_SNAKE_CASE : List[Any] = 24
__SCREAMING_SNAKE_CASE : Union[str, Any] = 768
__SCREAMING_SNAKE_CASE : str = 3072
if model_name == "xclip-large-patch14-16-frames":
__SCREAMING_SNAKE_CASE : Any = 336
__SCREAMING_SNAKE_CASE : int = XCLIPConfig.from_text_vision_configs(lowercase__ , lowercase__ )
if "large" in model_name:
__SCREAMING_SNAKE_CASE : Optional[int] = 768
return config
def _UpperCamelCase ( lowercase__ ):
# text encoder
if name == "token_embedding.weight":
__SCREAMING_SNAKE_CASE : Dict = name.replace('''token_embedding.weight''' , '''text_model.embeddings.token_embedding.weight''' )
if name == "positional_embedding":
__SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' )
if "ln_1" in name:
__SCREAMING_SNAKE_CASE : Dict = name.replace('''ln_1''' , '''layer_norm1''' )
if "ln_2" in name:
__SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''ln_2''' , '''layer_norm2''' )
if "c_fc" in name:
__SCREAMING_SNAKE_CASE : Tuple = name.replace('''c_fc''' , '''fc1''' )
if "c_proj" in name:
__SCREAMING_SNAKE_CASE : Tuple = name.replace('''c_proj''' , '''fc2''' )
if name.startswith('''transformer.resblocks''' ):
__SCREAMING_SNAKE_CASE : int = name.replace('''transformer.resblocks''' , '''text_model.encoder.layers''' )
if "attn.out_proj" in name and "message" not in name:
__SCREAMING_SNAKE_CASE : List[Any] = name.replace('''attn.out_proj''' , '''self_attn.out_proj''' )
if "ln_final" in name:
__SCREAMING_SNAKE_CASE : List[Any] = name.replace('''ln_final''' , '''text_model.final_layer_norm''' )
# visual encoder
if name == "visual.class_embedding":
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''visual.class_embedding''' , '''vision_model.embeddings.class_embedding''' )
if name == "visual.positional_embedding":
__SCREAMING_SNAKE_CASE : str = name.replace('''visual.positional_embedding''' , '''vision_model.embeddings.position_embedding.weight''' )
if name.startswith('''visual.transformer.resblocks''' ):
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''visual.transformer.resblocks''' , '''vision_model.encoder.layers''' )
if "visual.conv1" in name:
__SCREAMING_SNAKE_CASE : List[Any] = name.replace('''visual.conv1''' , '''vision_model.embeddings.patch_embedding''' )
if "visual.ln_pre" in name:
__SCREAMING_SNAKE_CASE : Dict = name.replace('''visual.ln_pre''' , '''vision_model.pre_layernorm''' )
if "visual.ln_post" in name:
__SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''visual.ln_post''' , '''vision_model.post_layernorm''' )
if "visual.proj" in name:
__SCREAMING_SNAKE_CASE : str = name.replace('''visual.proj''' , '''visual_projection.weight''' )
if "text_projection" in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''text_projection''' , '''text_projection.weight''' )
# things on top
if "prompts_visual_proj" in name:
__SCREAMING_SNAKE_CASE : Tuple = name.replace('''prompts_visual_proj''' , '''prompts_visual_projection''' )
if "prompts_visual_ln" in name:
__SCREAMING_SNAKE_CASE : Any = name.replace('''prompts_visual_ln''' , '''prompts_visual_layernorm''' )
# mit
if name == "mit.positional_embedding":
__SCREAMING_SNAKE_CASE : Dict = name.replace('''positional''' , '''position''' )
if name.startswith('''mit.resblocks''' ):
__SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''mit.resblocks''' , '''mit.encoder.layers''' )
# prompts generator
if name.startswith('''prompts_generator.norm''' ):
__SCREAMING_SNAKE_CASE : List[Any] = name.replace('''prompts_generator.norm''' , '''prompts_generator.layernorm''' )
return name
def _UpperCamelCase ( lowercase__ , lowercase__ ):
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE : List[str] = orig_state_dict.pop(lowercase__ )
if "attn.in_proj" in key:
__SCREAMING_SNAKE_CASE : str = key.split('''.''' )
if key.startswith('''visual''' ):
__SCREAMING_SNAKE_CASE : str = key_split[3]
__SCREAMING_SNAKE_CASE : Union[str, Any] = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
__SCREAMING_SNAKE_CASE : int = val[
:dim, :
]
__SCREAMING_SNAKE_CASE : str = val[
dim : dim * 2, :
]
__SCREAMING_SNAKE_CASE : str = val[
-dim:, :
]
else:
__SCREAMING_SNAKE_CASE : str = val[
:dim
]
__SCREAMING_SNAKE_CASE : Union[str, Any] = val[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE : Optional[int] = val[
-dim:
]
else:
if "weight" in key:
__SCREAMING_SNAKE_CASE : Any = val[
:dim, :
]
__SCREAMING_SNAKE_CASE : Optional[int] = val[
dim : dim * 2, :
]
__SCREAMING_SNAKE_CASE : Optional[int] = val[
-dim:, :
]
else:
__SCREAMING_SNAKE_CASE : Optional[int] = val[:dim]
__SCREAMING_SNAKE_CASE : Optional[int] = val[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE : Tuple = val[-dim:]
elif key.startswith('''mit''' ):
__SCREAMING_SNAKE_CASE : Dict = key_split[2]
__SCREAMING_SNAKE_CASE : Any = config.vision_config.mit_hidden_size
if "weight" in key:
__SCREAMING_SNAKE_CASE : Dict = val[:dim, :]
__SCREAMING_SNAKE_CASE : Union[str, Any] = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE : List[Any] = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE : Dict = val[:dim]
__SCREAMING_SNAKE_CASE : Optional[Any] = val[dim : dim * 2]
__SCREAMING_SNAKE_CASE : List[str] = val[-dim:]
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = key_split[2]
__SCREAMING_SNAKE_CASE : Optional[int] = config.text_config.hidden_size
if "weight" in key:
__SCREAMING_SNAKE_CASE : Dict = val[:dim, :]
__SCREAMING_SNAKE_CASE : Optional[Any] = val[
dim : dim * 2, :
]
__SCREAMING_SNAKE_CASE : Optional[Any] = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE : Tuple = val[:dim]
__SCREAMING_SNAKE_CASE : Any = val[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE : Any = val[-dim:]
else:
__SCREAMING_SNAKE_CASE : List[str] = rename_key(lowercase__ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
__SCREAMING_SNAKE_CASE : Any = val.T
__SCREAMING_SNAKE_CASE : Dict = val
return orig_state_dict
def _UpperCamelCase ( lowercase__ ):
if num_frames == 8:
__SCREAMING_SNAKE_CASE : List[str] = '''eating_spaghetti_8_frames.npy'''
elif num_frames == 16:
__SCREAMING_SNAKE_CASE : str = '''eating_spaghetti.npy'''
elif num_frames == 32:
__SCREAMING_SNAKE_CASE : Dict = '''eating_spaghetti_32_frames.npy'''
__SCREAMING_SNAKE_CASE : str = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename=lowercase__ , repo_type='''dataset''' , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.load(lowercase__ )
return list(lowercase__ )
def _UpperCamelCase ( lowercase__ , lowercase__=None , lowercase__=False ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
__SCREAMING_SNAKE_CASE : Optional[Any] = model_to_url[model_name]
__SCREAMING_SNAKE_CASE : int = 8
if "16-frames" in model_name:
__SCREAMING_SNAKE_CASE : Tuple = 16
elif "shot" in model_name:
__SCREAMING_SNAKE_CASE : List[Any] = 32
__SCREAMING_SNAKE_CASE : Any = get_xclip_config(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE : str = XCLIPModel(lowercase__ )
model.eval()
if "drive" in checkpoint_url:
__SCREAMING_SNAKE_CASE : Optional[int] = '''pytorch_model.bin'''
gdown.cached_download(lowercase__ , lowercase__ , quiet=lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.load(lowercase__ , map_location='''cpu''' )['''model''']
else:
__SCREAMING_SNAKE_CASE : List[str] = torch.hub.load_state_dict_from_url(lowercase__ )['''model''']
__SCREAMING_SNAKE_CASE : str = convert_state_dict(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE : Dict = XCLIPModel(lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = model.load_state_dict(lowercase__ , strict=lowercase__ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
__SCREAMING_SNAKE_CASE : Tuple = 336 if model_name == '''xclip-large-patch14-16-frames''' else 224
__SCREAMING_SNAKE_CASE : Dict = VideoMAEImageProcessor(size=lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' )
__SCREAMING_SNAKE_CASE : int = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = XCLIPProcessor(image_processor=lowercase__ , tokenizer=lowercase__ )
__SCREAMING_SNAKE_CASE : Dict = prepare_video(lowercase__ )
__SCREAMING_SNAKE_CASE : int = processor(
text=['''playing sports''', '''eating spaghetti''', '''go shopping'''] , videos=lowercase__ , return_tensors='''pt''' , padding=lowercase__ )
print('''Shape of pixel values:''' , inputs.pixel_values.shape )
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Optional[Any] = model(**lowercase__ )
# Verify outputs
__SCREAMING_SNAKE_CASE : Tuple = outputs.logits_per_video
__SCREAMING_SNAKE_CASE : Optional[int] = logits_per_video.softmax(dim=1 )
print('''Probs:''' , lowercase__ )
# kinetics-400
if model_name == "xclip-base-patch32":
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
__SCREAMING_SNAKE_CASE : Dict = torch.tensor([[7.09_99e-04, 9.98_83e-01, 4.55_80e-04]] )
elif model_name == "xclip-base-patch16":
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[7.69_37e-04, 9.97_28e-01, 1.94_73e-03]] )
elif model_name == "xclip-large-patch14":
__SCREAMING_SNAKE_CASE : Any = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[3.38_77e-04, 9.99_37e-01, 2.88_88e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
__SCREAMING_SNAKE_CASE : Any = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[3.85_54e-04, 9.99_29e-01, 3.27_54e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
__SCREAMING_SNAKE_CASE : str = torch.tensor([[7.18_90e-06, 9.99_94e-01, 5.65_59e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
__SCREAMING_SNAKE_CASE : Dict = torch.tensor([[1.03_20e-05, 9.99_93e-01, 6.24_35e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[4.13_77e-06, 9.99_90e-01, 9.83_86e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
__SCREAMING_SNAKE_CASE : Any = torch.tensor([[4.13_47e-05, 9.99_62e-01, 3.34_11e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
__SCREAMING_SNAKE_CASE : Dict = torch.tensor([[8.58_57e-05, 9.99_28e-01, 6.32_91e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[8.58_57e-05, 9.99_28e-01, 6.32_91e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
__SCREAMING_SNAKE_CASE : str = torch.tensor([[9.82_19e-04, 9.95_93e-01, 3.08_63e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[3.50_82e-04, 9.97_85e-01, 1.79_66e-03]] )
else:
raise ValueError(F'''Model name {model_name} not supported''' )
assert torch.allclose(lowercase__ , lowercase__ , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase__ )
if push_to_hub:
print('''Pushing model, processor and slow tokenizer files to the hub...''' )
model.push_to_hub(lowercase__ , organization='''nielsr''' )
processor.push_to_hub(lowercase__ , organization='''nielsr''' )
slow_tokenizer.push_to_hub(lowercase__ , organization='''nielsr''' )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='xclip-base-patch32',
type=str,
help='Name of the model.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__lowerCAmelCase : Optional[int] =parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 9 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__lowerCAmelCase : List[str] ='true'
def _UpperCamelCase ( lowercase__ , lowercase__=82 , lowercase__=16 ):
set_seed(42 )
__SCREAMING_SNAKE_CASE : Optional[int] = RegressionModel()
__SCREAMING_SNAKE_CASE : Optional[int] = deepcopy(lowercase__ )
__SCREAMING_SNAKE_CASE : Any = RegressionDataset(length=lowercase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = DataLoader(lowercase__ , batch_size=lowercase__ )
model.to(accelerator.device )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = accelerator.prepare(lowercase__ , lowercase__ )
return model, ddp_model, dataloader
def _UpperCamelCase ( lowercase__ , lowercase__=False ):
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
__SCREAMING_SNAKE_CASE : str = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
with accelerator.main_process_first():
__SCREAMING_SNAKE_CASE : Tuple = dataset.map(
lowercase__ , batched=lowercase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
__SCREAMING_SNAKE_CASE : List[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowercase__ ):
if use_longest:
return tokenizer.pad(lowercase__ , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(lowercase__ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return DataLoader(lowercase__ , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=16 )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : str = Accelerator(dispatch_batches=lowercase__ , split_batches=lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = get_dataloader(lowercase__ , not dispatch_batches )
__SCREAMING_SNAKE_CASE : List[str] = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = accelerator.prepare(lowercase__ , lowercase__ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = []
for batch in dataloader:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = batch.values()
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Dict = model(lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = [], []
for logit, targ in logits_and_targets:
logits.append(lowercase__ )
targs.append(lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = torch.cat(lowercase__ ), torch.cat(lowercase__ )
return logits, targs
def _UpperCamelCase ( lowercase__ , lowercase__=82 , lowercase__=False , lowercase__=False , lowercase__=16 ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = get_basic_setup(lowercase__ , lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = generate_predictions(lowercase__ , lowercase__ , lowercase__ )
assert (
len(lowercase__ ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowercase__ )}'''
def _UpperCamelCase ( lowercase__ = False , lowercase__ = False ):
__SCREAMING_SNAKE_CASE : Optional[Any] = evaluate.load('''glue''' , '''mrpc''' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = get_mrpc_setup(lowercase__ , lowercase__ )
# First do baseline
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = setup['''no''']
model.to(lowercase__ )
model.eval()
for batch in dataloader:
batch.to(lowercase__ )
with torch.inference_mode():
__SCREAMING_SNAKE_CASE : Dict = model(**lowercase__ )
__SCREAMING_SNAKE_CASE : Dict = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowercase__ , references=batch['''labels'''] )
__SCREAMING_SNAKE_CASE : int = metric.compute()
# Then do distributed
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
__SCREAMING_SNAKE_CASE : int = model(**lowercase__ )
__SCREAMING_SNAKE_CASE : str = outputs.logits.argmax(dim=-1 )
__SCREAMING_SNAKE_CASE : Any = batch['''labels''']
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowercase__ , references=lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Dict = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(lowercase__ , lowercase__ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__SCREAMING_SNAKE_CASE : List[Any] = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(lowercase__ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
__SCREAMING_SNAKE_CASE : Tuple = Accelerator()
test_torch_metrics(lowercase__ , 512 )
accelerator.state._reset_state()
def _UpperCamelCase ( lowercase__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 9 | 1 |
from PIL import Image
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = image.size
__SCREAMING_SNAKE_CASE : Optional[int] = 0
__SCREAMING_SNAKE_CASE : Optional[Any] = image.load()
for i in range(lowercase__ ):
for j in range(lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(lowercase__ ):
for i in range(lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
__lowerCAmelCase : Dict =mean_threshold(Image.open('path_to_image').convert('L'))
image.save('output_image_path')
| 9 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__lowerCAmelCase : Union[str, Any] ={
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
}
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
if got_ver is None or want_ver is None:
raise ValueError(
F'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
F''' reinstalling {pkg}.''' )
if not ops[op](version.parse(lowercase__ ) , version.parse(lowercase__ ) ):
raise ImportError(
F'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def _UpperCamelCase ( lowercase__ , lowercase__ = None ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = F'''\n{hint}''' if hint is not None else ''''''
# non-versioned check
if re.match(R'''^[\w_\-\d]+$''' , lowercase__ ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = requirement, None, None
else:
__SCREAMING_SNAKE_CASE : List[Any] = re.findall(R'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''' , lowercase__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
F''' got {requirement}''' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = match[0]
__SCREAMING_SNAKE_CASE : Optional[int] = want_full.split(''',''' ) # there could be multiple requirements
__SCREAMING_SNAKE_CASE : Optional[Any] = {}
for w in want_range:
__SCREAMING_SNAKE_CASE : Any = re.findall(R'''^([\s!=<>]{1,2})(.+)''' , lowercase__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
F''' but got {requirement}''' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = match[0]
__SCREAMING_SNAKE_CASE : List[Any] = want_ver
if op not in ops:
raise ValueError(F'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
__SCREAMING_SNAKE_CASE : Optional[Any] = '''.'''.join([str(lowercase__ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
return
# check if any version is installed
try:
__SCREAMING_SNAKE_CASE : Optional[int] = importlib.metadata.version(lowercase__ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(lowercase__ , lowercase__ )
| 9 | 1 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _lowercase :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PegasusConfig
SCREAMING_SNAKE_CASE__ : Tuple = {}
SCREAMING_SNAKE_CASE__ : Tuple = '''gelu'''
def __init__( self :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[Any]=13 , lowerCAmelCase__ :Union[str, Any]=7 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Union[str, Any]=False , lowerCAmelCase__ :Tuple=99 , lowerCAmelCase__ :Optional[Any]=32 , lowerCAmelCase__ :Optional[int]=2 , lowerCAmelCase__ :Union[str, Any]=4 , lowerCAmelCase__ :List[str]=37 , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :int=0.1 , lowerCAmelCase__ :str=40 , lowerCAmelCase__ :Dict=2 , lowerCAmelCase__ :List[str]=1 , lowerCAmelCase__ :str=0 , ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = parent
__SCREAMING_SNAKE_CASE : List[str] = batch_size
__SCREAMING_SNAKE_CASE : Dict = seq_length
__SCREAMING_SNAKE_CASE : int = is_training
__SCREAMING_SNAKE_CASE : Any = use_labels
__SCREAMING_SNAKE_CASE : Dict = vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = hidden_size
__SCREAMING_SNAKE_CASE : Any = num_hidden_layers
__SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
__SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
__SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[str] = eos_token_id
__SCREAMING_SNAKE_CASE : List[str] = pad_token_id
__SCREAMING_SNAKE_CASE : Optional[int] = bos_token_id
def __magic_name__( self :str ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__SCREAMING_SNAKE_CASE : Dict = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__SCREAMING_SNAKE_CASE : str = tf.concat([input_ids, eos_tensor] , axis=1 )
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : Tuple = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__SCREAMING_SNAKE_CASE : Tuple = prepare_pegasus_inputs_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return config, inputs_dict
def __magic_name__( self :List[str] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = TFPegasusModel(config=lowerCAmelCase__ ).get_decoder()
__SCREAMING_SNAKE_CASE : List[Any] = inputs_dict['''input_ids''']
__SCREAMING_SNAKE_CASE : Union[str, Any] = input_ids[:1, :]
__SCREAMING_SNAKE_CASE : Optional[Any] = inputs_dict['''attention_mask'''][:1, :]
__SCREAMING_SNAKE_CASE : str = inputs_dict['''head_mask''']
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1
# first forward pass
__SCREAMING_SNAKE_CASE : Any = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , head_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__SCREAMING_SNAKE_CASE : List[str] = tf.concat([input_ids, next_tokens] , axis=-1 )
__SCREAMING_SNAKE_CASE : Dict = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__SCREAMING_SNAKE_CASE : List[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx]
__SCREAMING_SNAKE_CASE : Union[str, Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCAmelCase__ , lowerCAmelCase__ , rtol=1E-3 )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , ):
if attention_mask is None:
__SCREAMING_SNAKE_CASE : str = tf.cast(tf.math.not_equal(lowercase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__SCREAMING_SNAKE_CASE : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__SCREAMING_SNAKE_CASE : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__SCREAMING_SNAKE_CASE : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _lowercase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ : List[str] = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : List[Any] = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
def __magic_name__( self :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : List[str] = TFPegasusModelTester(self )
__SCREAMING_SNAKE_CASE : Any = ConfigTester(self , config_class=lowerCAmelCase__ )
def __magic_name__( self :Tuple ) -> List[Any]:
self.config_tester.run_common_tests()
def __magic_name__( self :Tuple ) -> Dict:
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase__ )
@require_sentencepiece
@require_tokenizers
@require_tf
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
SCREAMING_SNAKE_CASE__ : int = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''google/pegasus-xsum'''
@cached_property
def __magic_name__( self :Tuple ) -> Optional[Any]:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __magic_name__( self :List[str] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __magic_name__( self :Union[str, Any] , **lowerCAmelCase__ :List[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.translate_src_text(**lowerCAmelCase__ )
assert self.expected_text == generated_words
def __magic_name__( self :Any , **lowerCAmelCase__ :int ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(self.src_text , **lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors='''tf''' )
__SCREAMING_SNAKE_CASE : str = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Any = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowerCAmelCase__ )
return generated_words
@slow
def __magic_name__( self :Tuple ) -> int:
self._assert_generated_batch_equal_expected()
| 9 |
from __future__ import annotations
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = 0.00
__SCREAMING_SNAKE_CASE : List[str] = 0
for resistor in resistors:
if resistor <= 0:
__SCREAMING_SNAKE_CASE : Any = F'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(lowercase__ )
first_sum += 1 / float(lowercase__ )
index += 1
return 1 / first_sum
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = 0.00
__SCREAMING_SNAKE_CASE : int = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
__SCREAMING_SNAKE_CASE : Tuple = F'''Resistor at index {index} has a negative value!'''
raise ValueError(lowercase__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 | 1 |
def _UpperCamelCase ( lowercase__ = 10 , lowercase__ = 1000 , lowercase__ = True ):
assert (
isinstance(lowercase__ , lowercase__ )
and isinstance(lowercase__ , lowercase__ )
and isinstance(lowercase__ , lowercase__ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('''Invalid value for min_val or max_val (min_value < max_value)''' )
return min_val if option else max_val
def _UpperCamelCase ( lowercase__ , lowercase__ ):
return int((number_a + number_a) / 2 )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
assert (
isinstance(lowercase__ , lowercase__ ) and isinstance(lowercase__ , lowercase__ ) and isinstance(lowercase__ , lowercase__ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('''argument value for lower and higher must be(lower > higher)''' )
if not lower < to_guess < higher:
raise ValueError(
'''guess value must be within the range of lower and higher value''' )
def answer(lowercase__ ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('''started...''' )
__SCREAMING_SNAKE_CASE : Dict = lower
__SCREAMING_SNAKE_CASE : Union[str, Any] = higher
__SCREAMING_SNAKE_CASE : List[Any] = []
while True:
__SCREAMING_SNAKE_CASE : Dict = get_avg(lowercase__ , lowercase__ )
last_numbers.append(lowercase__ )
if answer(lowercase__ ) == "low":
__SCREAMING_SNAKE_CASE : Optional[Any] = number
elif answer(lowercase__ ) == "high":
__SCREAMING_SNAKE_CASE : Dict = number
else:
break
print(F'''guess the number : {last_numbers[-1]}''' )
print(F'''details : {last_numbers!s}''' )
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : List[str] = int(input('''Enter lower value : ''' ).strip() )
__SCREAMING_SNAKE_CASE : Dict = int(input('''Enter high value : ''' ).strip() )
__SCREAMING_SNAKE_CASE : List[Any] = int(input('''Enter value to guess : ''' ).strip() )
guess_the_number(lowercase__ , lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 9 |
from ..utils import DummyObject, requires_backends
class _lowercase ( metaclass=A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ['''keras_nlp''']
def __init__( self :Tuple , *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :Dict ) -> Dict:
requires_backends(self , ['''keras_nlp'''] )
| 9 | 1 |
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__lowerCAmelCase : List[Any] =logging.get_logger(__name__)
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = ['''input_features''', '''attention_mask''']
def __init__( self :int , lowerCAmelCase__ :List[Any]=80 , lowerCAmelCase__ :Union[str, Any]=16_000 , lowerCAmelCase__ :List[str]=80 , lowerCAmelCase__ :str=0.0 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :Union[str, Any]=True , **lowerCAmelCase__ :Tuple , ) -> List[str]:
super().__init__(feature_size=lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , padding_value=lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = num_mel_bins
__SCREAMING_SNAKE_CASE : List[str] = do_ceptral_normalize
__SCREAMING_SNAKE_CASE : Any = normalize_means
__SCREAMING_SNAKE_CASE : Dict = normalize_vars
__SCREAMING_SNAKE_CASE : Any = True
def __magic_name__( self :str , lowerCAmelCase__ :np.ndarray , ) -> np.ndarray:
__SCREAMING_SNAKE_CASE : Any = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
__SCREAMING_SNAKE_CASE : str = torch.from_numpy(lowerCAmelCase__ ).unsqueeze(0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = ta_kaldi.fbank(lowerCAmelCase__ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def __magic_name__( lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[bool] = True , lowerCAmelCase__ :Optional[bool] = True , lowerCAmelCase__ :float = 0.0 , ) -> np.ndarray:
# make sure we normalize float32 arrays
if normalize_means:
__SCREAMING_SNAKE_CASE : int = x[:input_length].mean(axis=0 )
__SCREAMING_SNAKE_CASE : List[str] = np.subtract(lowerCAmelCase__ , lowerCAmelCase__ )
if normalize_vars:
__SCREAMING_SNAKE_CASE : Optional[int] = x[:input_length].std(axis=0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.divide(lowerCAmelCase__ , lowerCAmelCase__ )
if input_length < x.shape[0]:
__SCREAMING_SNAKE_CASE : Tuple = padding_value
# make sure array is in float32
__SCREAMING_SNAKE_CASE : Union[str, Any] = x.astype(np.floataa )
return x
def __magic_name__( self :str , lowerCAmelCase__ :List[np.ndarray] , lowerCAmelCase__ :Optional[np.ndarray] = None ) -> List[np.ndarray]:
__SCREAMING_SNAKE_CASE : Optional[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(lowerCAmelCase__ , lowerCAmelCase__ , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
def __call__( self :int , lowerCAmelCase__ :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCAmelCase__ :Union[bool, str, PaddingStrategy] = False , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :Optional[Union[str, TensorType]] = None , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :Optional[bool] = None , **lowerCAmelCase__ :int , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
__SCREAMING_SNAKE_CASE : Tuple = isinstance(lowerCAmelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
__SCREAMING_SNAKE_CASE : List[str] = is_batched_numpy or (
isinstance(lowerCAmelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__SCREAMING_SNAKE_CASE : List[Any] = [np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase__ , np.ndarray ):
__SCREAMING_SNAKE_CASE : Optional[Any] = np.asarray(lowerCAmelCase__ , dtype=np.floataa )
elif isinstance(lowerCAmelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__SCREAMING_SNAKE_CASE : List[str] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__SCREAMING_SNAKE_CASE : Optional[Any] = [raw_speech]
# extract fbank features
__SCREAMING_SNAKE_CASE : Dict = [self._extract_fbank_features(lowerCAmelCase__ ) for waveform in raw_speech]
# convert into correct format for padding
__SCREAMING_SNAKE_CASE : Tuple = BatchFeature({'''input_features''': features} )
__SCREAMING_SNAKE_CASE : Tuple = self.pad(
lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
# make sure list is in array format
__SCREAMING_SNAKE_CASE : str = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : int = [np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for feature in input_features]
__SCREAMING_SNAKE_CASE : Dict = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
__SCREAMING_SNAKE_CASE : Tuple = [np.asarray(lowerCAmelCase__ , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
__SCREAMING_SNAKE_CASE : Tuple = (
np.array(lowerCAmelCase__ , dtype=np.intaa )
if self._get_padding_strategies(lowerCAmelCase__ , max_length=lowerCAmelCase__ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
__SCREAMING_SNAKE_CASE : List[str] = self.normalize(
padded_inputs['''input_features'''] , attention_mask=lowerCAmelCase__ )
if return_tensors is not None:
__SCREAMING_SNAKE_CASE : Dict = padded_inputs.convert_to_tensors(lowerCAmelCase__ )
return padded_inputs
| 9 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self :Any , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple=7 , lowerCAmelCase__ :List[Any]=3 , lowerCAmelCase__ :Any=10 , lowerCAmelCase__ :Optional[int]=18 , lowerCAmelCase__ :Dict=30 , lowerCAmelCase__ :Tuple=400 , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :str=True , lowerCAmelCase__ :List[str]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :List[str]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :Optional[Any]=None , ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Dict = size if size is not None else {'''shortest_edge''': 18}
__SCREAMING_SNAKE_CASE : Optional[int] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__SCREAMING_SNAKE_CASE : Tuple = parent
__SCREAMING_SNAKE_CASE : List[Any] = batch_size
__SCREAMING_SNAKE_CASE : List[str] = num_channels
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_frames
__SCREAMING_SNAKE_CASE : Tuple = image_size
__SCREAMING_SNAKE_CASE : Optional[Any] = min_resolution
__SCREAMING_SNAKE_CASE : Any = max_resolution
__SCREAMING_SNAKE_CASE : List[Any] = do_resize
__SCREAMING_SNAKE_CASE : Optional[Any] = size
__SCREAMING_SNAKE_CASE : Optional[int] = do_normalize
__SCREAMING_SNAKE_CASE : List[Any] = image_mean
__SCREAMING_SNAKE_CASE : List[str] = image_std
__SCREAMING_SNAKE_CASE : str = crop_size
def __magic_name__( self :Tuple ) -> Any:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = VivitImageProcessor if is_vision_available() else None
def __magic_name__( self :List[str] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = VivitImageProcessingTester(self )
@property
def __magic_name__( self :int ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__( self :List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_std''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''size''' ) )
def __magic_name__( self :Optional[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def __magic_name__( self :List[Any] ) -> Union[str, Any]:
# Initialize image_processing
__SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
__SCREAMING_SNAKE_CASE : List[Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for video in video_inputs:
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE : List[str] = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __magic_name__( self :str ) -> int:
# Initialize image_processing
__SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__SCREAMING_SNAKE_CASE : List[str] = prepare_video_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for video in video_inputs:
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
__SCREAMING_SNAKE_CASE : Optional[int] = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : Any = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __magic_name__( self :Any ) -> List[str]:
# Initialize image_processing
__SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE : Optional[int] = prepare_video_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for video in video_inputs:
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
__SCREAMING_SNAKE_CASE : Tuple = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : Optional[int] = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 9 | 1 |
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] =argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
__lowerCAmelCase : str =parser.parse_args()
if args.model_type == "bert":
__lowerCAmelCase : str =BertForMaskedLM.from_pretrained(args.model_name)
__lowerCAmelCase : Tuple ='bert'
else:
raise ValueError('args.model_type should be "bert".')
__lowerCAmelCase : Tuple =model.state_dict()
__lowerCAmelCase : Dict ={}
for w in ["word_embeddings", "position_embeddings"]:
__lowerCAmelCase : str =state_dict[f"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
__lowerCAmelCase : Union[str, Any] =state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""]
__lowerCAmelCase : Dict =0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
__lowerCAmelCase : Optional[Any] =state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
__lowerCAmelCase : Tuple =state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
__lowerCAmelCase : int =state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
__lowerCAmelCase : List[Any] =state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
__lowerCAmelCase : Optional[Any] =state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
__lowerCAmelCase : Union[str, Any] =state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
__lowerCAmelCase : Optional[int] =state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
__lowerCAmelCase : Dict =state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
__lowerCAmelCase : str =state_dict['cls.predictions.decoder.weight']
__lowerCAmelCase : str =state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
__lowerCAmelCase : List[Any] =state_dict[f"""cls.predictions.transform.dense.{w}"""]
__lowerCAmelCase : int =state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 9 |
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self :Optional[Any] , lowerCAmelCase__ :Optional[Any] ) -> str:
__SCREAMING_SNAKE_CASE : Optional[Any] = parent
def __magic_name__( self :List[Any] ) -> Tuple:
return {}
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Optional[Any] = '''<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR="FFFFFF">
<HR>
<a href="http://google.com">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style="color:#0000FF">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>'''
__SCREAMING_SNAKE_CASE : str = '''
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
'''
return [html_string_a, html_string_a]
@require_bsa
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = MarkupLMFeatureExtractor if is_bsa_available() else None
def __magic_name__( self :int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = MarkupLMFeatureExtractionTester(self )
@property
def __magic_name__( self :Any ) -> Optional[Any]:
return self.feature_extract_tester.prepare_feat_extract_dict()
def __magic_name__( self :Optional[int] ) -> Any:
# Initialize feature_extractor
__SCREAMING_SNAKE_CASE : int = self.feature_extraction_class()
# Test not batched input
__SCREAMING_SNAKE_CASE : Tuple = get_html_strings()[0]
__SCREAMING_SNAKE_CASE : Dict = feature_extractor(lowerCAmelCase__ )
# fmt: off
__SCREAMING_SNAKE_CASE : str = [['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']]
__SCREAMING_SNAKE_CASE : List[str] = [['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']]
# fmt: on
self.assertEqual(encoding.nodes , lowerCAmelCase__ )
self.assertEqual(encoding.xpaths , lowerCAmelCase__ )
# Test batched
__SCREAMING_SNAKE_CASE : Tuple = get_html_strings()
__SCREAMING_SNAKE_CASE : Dict = feature_extractor(lowerCAmelCase__ )
# fmt: off
__SCREAMING_SNAKE_CASE : int = expected_nodes + [['''My First Heading''', '''My first paragraph.''']]
__SCREAMING_SNAKE_CASE : str = expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , lowerCAmelCase__ )
self.assertEqual(encoding.xpaths , lowerCAmelCase__ )
| 9 | 1 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
__lowerCAmelCase : Optional[Any] ='sshleifer/mar_enro_6_3_student'
class _lowercase ( A__ ):
'''simple docstring'''
def __magic_name__( self :List[str] ) -> Tuple:
super().setUp()
__SCREAMING_SNAKE_CASE : Tuple = cached_path(
'''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Optional[int] = f'''{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'''
@slow
@require_torch_gpu
def __magic_name__( self :List[Any] ) -> List[str]:
MarianMTModel.from_pretrained(lowerCAmelCase__ )
@slow
@require_torch_gpu
def __magic_name__( self :Dict ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Any = {
'''$MAX_LEN''': 64,
'''$BS''': 64,
'''$GAS''': 1,
'''$ENRO_DIR''': self.data_dir,
'''facebook/mbart-large-cc25''': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'''--learning_rate=3e-5''': '''--learning_rate 3e-4''',
'''--num_train_epochs 6''': '''--num_train_epochs 1''',
}
# Clean up bash script
__SCREAMING_SNAKE_CASE : List[Any] = (self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip()
__SCREAMING_SNAKE_CASE : Any = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
for k, v in env_vars_to_replace.items():
__SCREAMING_SNAKE_CASE : List[Any] = bash_script.replace(lowerCAmelCase__ , str(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Any = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
__SCREAMING_SNAKE_CASE : Dict = f'''
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
'''.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
__SCREAMING_SNAKE_CASE : Optional[int] = ['''finetune.py'''] + bash_script.split() + args
with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
__SCREAMING_SNAKE_CASE : Any = pl.Trainer.add_argparse_args(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = SummarizationModule.add_model_specific_args(lowerCAmelCase__ , os.getcwd() )
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
__SCREAMING_SNAKE_CASE : str = main(lowerCAmelCase__ )
# Check metrics
__SCREAMING_SNAKE_CASE : List[str] = load_json(model.metrics_save_path )
__SCREAMING_SNAKE_CASE : int = metrics['''val'''][0]
__SCREAMING_SNAKE_CASE : Dict = metrics['''val'''][-1]
self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f'''val_avg_{model.val_metric}'''] , lowerCAmelCase__ )
self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
__SCREAMING_SNAKE_CASE : Dict = os.listdir(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = [x for x in contents if x.endswith('''.ckpt''' )][0]
__SCREAMING_SNAKE_CASE : int = os.path.join(args.output_dir , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = torch.load(lowerCAmelCase__ , map_location='''cpu''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
__SCREAMING_SNAKE_CASE : Dict = {os.path.basename(lowerCAmelCase__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
class _lowercase ( A__ ):
'''simple docstring'''
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def __magic_name__( self :List[Any] ) -> int:
__SCREAMING_SNAKE_CASE : str = f'''{self.test_file_dir_str}/test_data/wmt_en_ro'''
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''--fp16_opt_level=O1''': '''''',
'''$MAX_LEN''': 128,
'''$BS''': 16,
'''$GAS''': 1,
'''$ENRO_DIR''': data_dir,
'''$m''': '''sshleifer/student_marian_en_ro_6_1''',
'''val_check_interval=0.25''': '''val_check_interval=1.0''',
}
# Clean up bash script
__SCREAMING_SNAKE_CASE : int = (
(self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip()
)
__SCREAMING_SNAKE_CASE : Optional[int] = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
__SCREAMING_SNAKE_CASE : Tuple = bash_script.replace('''--fp16 ''' , ''' ''' )
for k, v in env_vars_to_replace.items():
__SCREAMING_SNAKE_CASE : Any = bash_script.replace(lowerCAmelCase__ , str(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_auto_remove_tmp_dir()
__SCREAMING_SNAKE_CASE : Any = bash_script.replace('''--fp16''' , '''''' )
__SCREAMING_SNAKE_CASE : Tuple = 6
__SCREAMING_SNAKE_CASE : str = (
['''distillation.py''']
+ bash_script.split()
+ [
f'''--output_dir={output_dir}''',
'''--gpus=1''',
'''--learning_rate=1e-3''',
f'''--num_train_epochs={epochs}''',
'''--warmup_steps=10''',
'''--val_check_interval=1.0''',
'''--do_predict''',
]
)
with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser()
__SCREAMING_SNAKE_CASE : str = pl.Trainer.add_argparse_args(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = SummarizationDistiller.add_model_specific_args(lowerCAmelCase__ , os.getcwd() )
__SCREAMING_SNAKE_CASE : str = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
__SCREAMING_SNAKE_CASE : Any = distill_main(lowerCAmelCase__ )
# Check metrics
__SCREAMING_SNAKE_CASE : List[Any] = load_json(model.metrics_save_path )
__SCREAMING_SNAKE_CASE : List[str] = metrics['''val'''][0]
__SCREAMING_SNAKE_CASE : List[str] = metrics['''val'''][-1]
assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f'''val_avg_{model.val_metric}'''] , lowerCAmelCase__ )
# check lightning ckpt can be loaded and has a reasonable statedict
__SCREAMING_SNAKE_CASE : List[str] = os.listdir(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = [x for x in contents if x.endswith('''.ckpt''' )][0]
__SCREAMING_SNAKE_CASE : List[str] = os.path.join(args.output_dir , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = torch.load(lowerCAmelCase__ , map_location='''cpu''' )
__SCREAMING_SNAKE_CASE : int = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
__SCREAMING_SNAKE_CASE : Any = {os.path.basename(lowerCAmelCase__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
| 9 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = TransfoXLTokenizer
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
def __magic_name__( self :str ) -> Dict:
super().setUp()
__SCREAMING_SNAKE_CASE : List[str] = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
__SCREAMING_SNAKE_CASE : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __magic_name__( self :Any , **lowerCAmelCase__ :int ) -> str:
__SCREAMING_SNAKE_CASE : Optional[Any] = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :List[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : Dict = '''<unk> UNwanted , running'''
__SCREAMING_SNAKE_CASE : List[str] = '''<unk> unwanted, running'''
return input_text, output_text
def __magic_name__( self :Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : int = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(lowerCAmelCase__ , ['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [0, 4, 8, 7] )
def __magic_name__( self :Tuple ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = TransfoXLTokenizer(lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def __magic_name__( self :Tuple ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = TransfoXLTokenizer(lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __magic_name__( self :Dict ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[str] = TransfoXLTokenizer(lower_case=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
__SCREAMING_SNAKE_CASE : Optional[int] = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(tokenizer.convert_tokens_to_string(lowerCAmelCase__ ) , lowerCAmelCase__ )
def __magic_name__( self :str ) -> int:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Any = len(lowerCAmelCase__ )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(lowerCAmelCase__ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , '''new1''' )
| 9 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : Optional[int] ={
'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'],
'tokenization_roformer': ['RoFormerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] =['RoFormerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] =[
'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoFormerForCausalLM',
'RoFormerForMaskedLM',
'RoFormerForMultipleChoice',
'RoFormerForQuestionAnswering',
'RoFormerForSequenceClassification',
'RoFormerForTokenClassification',
'RoFormerLayer',
'RoFormerModel',
'RoFormerPreTrainedModel',
'load_tf_weights_in_roformer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] =[
'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRoFormerForCausalLM',
'TFRoFormerForMaskedLM',
'TFRoFormerForMultipleChoice',
'TFRoFormerForQuestionAnswering',
'TFRoFormerForSequenceClassification',
'TFRoFormerForTokenClassification',
'TFRoFormerLayer',
'TFRoFormerModel',
'TFRoFormerPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] =[
'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxRoFormerForMaskedLM',
'FlaxRoFormerForMultipleChoice',
'FlaxRoFormerForQuestionAnswering',
'FlaxRoFormerForSequenceClassification',
'FlaxRoFormerForTokenClassification',
'FlaxRoFormerModel',
'FlaxRoFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : List[str] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 9 |
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__=False ):
if isinstance(lowercase__ , lowercase__ ) and isinstance(lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = len(set_a.intersection(lowercase__ ) )
if alternative_union:
__SCREAMING_SNAKE_CASE : int = len(lowercase__ ) + len(lowercase__ )
else:
__SCREAMING_SNAKE_CASE : int = len(set_a.union(lowercase__ ) )
return intersection / union
if isinstance(lowercase__ , (list, tuple) ) and isinstance(lowercase__ , (list, tuple) ):
__SCREAMING_SNAKE_CASE : Dict = [element for element in set_a if element in set_b]
if alternative_union:
__SCREAMING_SNAKE_CASE : Optional[int] = len(lowercase__ ) + len(lowercase__ )
return len(lowercase__ ) / union
else:
__SCREAMING_SNAKE_CASE : Tuple = set_a + [element for element in set_b if element not in set_a]
return len(lowercase__ ) / len(lowercase__ )
return len(lowercase__ ) / len(lowercase__ )
return None
if __name__ == "__main__":
__lowerCAmelCase : List[Any] ={'a', 'b', 'c', 'd', 'e'}
__lowerCAmelCase : Optional[Any] ={'c', 'd', 'e', 'f', 'h', 'i'}
print(jaccard_similarity(set_a, set_b))
| 9 | 1 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 9 |
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
__lowerCAmelCase : Optional[int] ='\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
__lowerCAmelCase : Optional[Any] ='\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
__lowerCAmelCase : Dict ='\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def _UpperCamelCase ( lowercase__ , lowercase__ ):
return float((preds == labels).mean() )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = simple_accuracy(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE : List[str] = float(fa_score(y_true=lowercase__ , y_pred=lowercase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = np.array(lowercase__ )
__SCREAMING_SNAKE_CASE : str = np.array(lowercase__ )
__SCREAMING_SNAKE_CASE : str = en_sentvecs.shape[0]
# mean centering
__SCREAMING_SNAKE_CASE : Tuple = en_sentvecs - np.mean(lowercase__ , axis=0 )
__SCREAMING_SNAKE_CASE : Optional[int] = in_sentvecs - np.mean(lowercase__ , axis=0 )
__SCREAMING_SNAKE_CASE : str = cdist(lowercase__ , lowercase__ , '''cosine''' )
__SCREAMING_SNAKE_CASE : int = np.array(range(lowercase__ ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = sim.argsort(axis=1 )[:, :10]
__SCREAMING_SNAKE_CASE : str = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def __magic_name__( self :Tuple ) -> Tuple:
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
'''references''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if self.config_name != '''cvit-mkb-clsr''' else None , )
def __magic_name__( self :List[str] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple ) -> str:
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(lowerCAmelCase__ , lowerCAmelCase__ )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(lowerCAmelCase__ , lowerCAmelCase__ )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
| 9 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
__lowerCAmelCase : List[str] ={
'Acehnese Arabic': 'ace_Arab',
'Acehnese Latin': 'ace_Latn',
'Mesopotamian Arabic': 'acm_Arab',
'Ta\'izzi-Adeni Arabic': 'acq_Arab',
'Tunisian Arabic': 'aeb_Arab',
'Afrikaans': 'afr_Latn',
'South Levantine Arabic': 'ajp_Arab',
'Akan': 'aka_Latn',
'Amharic': 'amh_Ethi',
'North Levantine Arabic': 'apc_Arab',
'Modern Standard Arabic': 'arb_Arab',
'Modern Standard Arabic Romanized': 'arb_Latn',
'Najdi Arabic': 'ars_Arab',
'Moroccan Arabic': 'ary_Arab',
'Egyptian Arabic': 'arz_Arab',
'Assamese': 'asm_Beng',
'Asturian': 'ast_Latn',
'Awadhi': 'awa_Deva',
'Central Aymara': 'ayr_Latn',
'South Azerbaijani': 'azb_Arab',
'North Azerbaijani': 'azj_Latn',
'Bashkir': 'bak_Cyrl',
'Bambara': 'bam_Latn',
'Balinese': 'ban_Latn',
'Belarusian': 'bel_Cyrl',
'Bemba': 'bem_Latn',
'Bengali': 'ben_Beng',
'Bhojpuri': 'bho_Deva',
'Banjar Arabic': 'bjn_Arab',
'Banjar Latin': 'bjn_Latn',
'Standard Tibetan': 'bod_Tibt',
'Bosnian': 'bos_Latn',
'Buginese': 'bug_Latn',
'Bulgarian': 'bul_Cyrl',
'Catalan': 'cat_Latn',
'Cebuano': 'ceb_Latn',
'Czech': 'ces_Latn',
'Chokwe': 'cjk_Latn',
'Central Kurdish': 'ckb_Arab',
'Crimean Tatar': 'crh_Latn',
'Welsh': 'cym_Latn',
'Danish': 'dan_Latn',
'German': 'deu_Latn',
'Southwestern Dinka': 'dik_Latn',
'Dyula': 'dyu_Latn',
'Dzongkha': 'dzo_Tibt',
'Greek': 'ell_Grek',
'English': 'eng_Latn',
'Esperanto': 'epo_Latn',
'Estonian': 'est_Latn',
'Basque': 'eus_Latn',
'Ewe': 'ewe_Latn',
'Faroese': 'fao_Latn',
'Fijian': 'fij_Latn',
'Finnish': 'fin_Latn',
'Fon': 'fon_Latn',
'French': 'fra_Latn',
'Friulian': 'fur_Latn',
'Nigerian Fulfulde': 'fuv_Latn',
'Scottish Gaelic': 'gla_Latn',
'Irish': 'gle_Latn',
'Galician': 'glg_Latn',
'Guarani': 'grn_Latn',
'Gujarati': 'guj_Gujr',
'Haitian Creole': 'hat_Latn',
'Hausa': 'hau_Latn',
'Hebrew': 'heb_Hebr',
'Hindi': 'hin_Deva',
'Chhattisgarhi': 'hne_Deva',
'Croatian': 'hrv_Latn',
'Hungarian': 'hun_Latn',
'Armenian': 'hye_Armn',
'Igbo': 'ibo_Latn',
'Ilocano': 'ilo_Latn',
'Indonesian': 'ind_Latn',
'Icelandic': 'isl_Latn',
'Italian': 'ita_Latn',
'Javanese': 'jav_Latn',
'Japanese': 'jpn_Jpan',
'Kabyle': 'kab_Latn',
'Jingpho': 'kac_Latn',
'Kamba': 'kam_Latn',
'Kannada': 'kan_Knda',
'Kashmiri Arabic': 'kas_Arab',
'Kashmiri Devanagari': 'kas_Deva',
'Georgian': 'kat_Geor',
'Central Kanuri Arabic': 'knc_Arab',
'Central Kanuri Latin': 'knc_Latn',
'Kazakh': 'kaz_Cyrl',
'Kabiyè': 'kbp_Latn',
'Kabuverdianu': 'kea_Latn',
'Khmer': 'khm_Khmr',
'Kikuyu': 'kik_Latn',
'Kinyarwanda': 'kin_Latn',
'Kyrgyz': 'kir_Cyrl',
'Kimbundu': 'kmb_Latn',
'Northern Kurdish': 'kmr_Latn',
'Kikongo': 'kon_Latn',
'Korean': 'kor_Hang',
'Lao': 'lao_Laoo',
'Ligurian': 'lij_Latn',
'Limburgish': 'lim_Latn',
'Lingala': 'lin_Latn',
'Lithuanian': 'lit_Latn',
'Lombard': 'lmo_Latn',
'Latgalian': 'ltg_Latn',
'Luxembourgish': 'ltz_Latn',
'Luba-Kasai': 'lua_Latn',
'Ganda': 'lug_Latn',
'Luo': 'luo_Latn',
'Mizo': 'lus_Latn',
'Standard Latvian': 'lvs_Latn',
'Magahi': 'mag_Deva',
'Maithili': 'mai_Deva',
'Malayalam': 'mal_Mlym',
'Marathi': 'mar_Deva',
'Minangkabau Arabic ': 'min_Arab',
'Minangkabau Latin': 'min_Latn',
'Macedonian': 'mkd_Cyrl',
'Plateau Malagasy': 'plt_Latn',
'Maltese': 'mlt_Latn',
'Meitei Bengali': 'mni_Beng',
'Halh Mongolian': 'khk_Cyrl',
'Mossi': 'mos_Latn',
'Maori': 'mri_Latn',
'Burmese': 'mya_Mymr',
'Dutch': 'nld_Latn',
'Norwegian Nynorsk': 'nno_Latn',
'Norwegian Bokmål': 'nob_Latn',
'Nepali': 'npi_Deva',
'Northern Sotho': 'nso_Latn',
'Nuer': 'nus_Latn',
'Nyanja': 'nya_Latn',
'Occitan': 'oci_Latn',
'West Central Oromo': 'gaz_Latn',
'Odia': 'ory_Orya',
'Pangasinan': 'pag_Latn',
'Eastern Panjabi': 'pan_Guru',
'Papiamento': 'pap_Latn',
'Western Persian': 'pes_Arab',
'Polish': 'pol_Latn',
'Portuguese': 'por_Latn',
'Dari': 'prs_Arab',
'Southern Pashto': 'pbt_Arab',
'Ayacucho Quechua': 'quy_Latn',
'Romanian': 'ron_Latn',
'Rundi': 'run_Latn',
'Russian': 'rus_Cyrl',
'Sango': 'sag_Latn',
'Sanskrit': 'san_Deva',
'Santali': 'sat_Olck',
'Sicilian': 'scn_Latn',
'Shan': 'shn_Mymr',
'Sinhala': 'sin_Sinh',
'Slovak': 'slk_Latn',
'Slovenian': 'slv_Latn',
'Samoan': 'smo_Latn',
'Shona': 'sna_Latn',
'Sindhi': 'snd_Arab',
'Somali': 'som_Latn',
'Southern Sotho': 'sot_Latn',
'Spanish': 'spa_Latn',
'Tosk Albanian': 'als_Latn',
'Sardinian': 'srd_Latn',
'Serbian': 'srp_Cyrl',
'Swati': 'ssw_Latn',
'Sundanese': 'sun_Latn',
'Swedish': 'swe_Latn',
'Swahili': 'swh_Latn',
'Silesian': 'szl_Latn',
'Tamil': 'tam_Taml',
'Tatar': 'tat_Cyrl',
'Telugu': 'tel_Telu',
'Tajik': 'tgk_Cyrl',
'Tagalog': 'tgl_Latn',
'Thai': 'tha_Thai',
'Tigrinya': 'tir_Ethi',
'Tamasheq Latin': 'taq_Latn',
'Tamasheq Tifinagh': 'taq_Tfng',
'Tok Pisin': 'tpi_Latn',
'Tswana': 'tsn_Latn',
'Tsonga': 'tso_Latn',
'Turkmen': 'tuk_Latn',
'Tumbuka': 'tum_Latn',
'Turkish': 'tur_Latn',
'Twi': 'twi_Latn',
'Central Atlas Tamazight': 'tzm_Tfng',
'Uyghur': 'uig_Arab',
'Ukrainian': 'ukr_Cyrl',
'Umbundu': 'umb_Latn',
'Urdu': 'urd_Arab',
'Northern Uzbek': 'uzn_Latn',
'Venetian': 'vec_Latn',
'Vietnamese': 'vie_Latn',
'Waray': 'war_Latn',
'Wolof': 'wol_Latn',
'Xhosa': 'xho_Latn',
'Eastern Yiddish': 'ydd_Hebr',
'Yoruba': 'yor_Latn',
'Yue Chinese': 'yue_Hant',
'Chinese Simplified': 'zho_Hans',
'Chinese Traditional': 'zho_Hant',
'Standard Malay': 'zsm_Latn',
'Zulu': 'zul_Latn',
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''facebook/nllb-200-distilled-600M'''
SCREAMING_SNAKE_CASE__ : List[str] = (
'''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '''
'''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '''
'''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '''
'''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'''
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''translator'''
SCREAMING_SNAKE_CASE__ : str = AutoTokenizer
SCREAMING_SNAKE_CASE__ : Any = AutoModelForSeqaSeqLM
SCREAMING_SNAKE_CASE__ : List[Any] = LANGUAGE_CODES
SCREAMING_SNAKE_CASE__ : List[Any] = ['''text''', '''text''', '''text''']
SCREAMING_SNAKE_CASE__ : Dict = ['''text''']
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Tuple ) -> Union[str, Any]:
if src_lang not in self.lang_to_code:
raise ValueError(f'''{src_lang} is not a supported language.''' )
if tgt_lang not in self.lang_to_code:
raise ValueError(f'''{tgt_lang} is not a supported language.''' )
__SCREAMING_SNAKE_CASE : Dict = self.lang_to_code[src_lang]
__SCREAMING_SNAKE_CASE : str = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowerCAmelCase__ , return_tensors='''pt''' , src_lang=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ )
def __magic_name__( self :Any , lowerCAmelCase__ :Optional[int] ) -> int:
return self.model.generate(**lowerCAmelCase__ )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :Union[str, Any] ) -> List[str]:
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowerCAmelCase__ )
| 9 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = nn.functional.normalize(lowercase__ )
__SCREAMING_SNAKE_CASE : Tuple = nn.functional.normalize(lowercase__ )
return torch.mm(lowercase__ , normalized_text_embeds.t() )
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = CLIPConfig
SCREAMING_SNAKE_CASE__ : List[str] = ['''CLIPEncoderLayer''']
def __init__( self :str , lowerCAmelCase__ :CLIPConfig ) -> Tuple:
super().__init__(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = CLIPVisionModel(config.vision_config )
__SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.ones(17 ) , requires_grad=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = nn.Parameter(torch.ones(3 ) , requires_grad=lowerCAmelCase__ )
@torch.no_grad()
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[int] ) -> Dict:
__SCREAMING_SNAKE_CASE : int = self.vision_model(lowerCAmelCase__ )[1] # pooled_output
__SCREAMING_SNAKE_CASE : Optional[Any] = self.visual_projection(lowerCAmelCase__ )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__SCREAMING_SNAKE_CASE : Optional[Any] = cosine_distance(lowerCAmelCase__ , self.special_care_embeds ).cpu().float().numpy()
__SCREAMING_SNAKE_CASE : Optional[Any] = cosine_distance(lowerCAmelCase__ , self.concept_embeds ).cpu().float().numpy()
__SCREAMING_SNAKE_CASE : List[Any] = []
__SCREAMING_SNAKE_CASE : List[Any] = image_embeds.shape[0]
for i in range(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
__SCREAMING_SNAKE_CASE : List[Any] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
__SCREAMING_SNAKE_CASE : List[str] = special_cos_dist[i][concept_idx]
__SCREAMING_SNAKE_CASE : Any = self.special_care_embeds_weights[concept_idx].item()
__SCREAMING_SNAKE_CASE : Union[str, Any] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} )
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
__SCREAMING_SNAKE_CASE : int = cos_dist[i][concept_idx]
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.concept_embeds_weights[concept_idx].item()
__SCREAMING_SNAKE_CASE : Tuple = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(lowerCAmelCase__ )
result.append(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = [len(res['''bad_concepts'''] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :torch.FloatTensor ) -> Any:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.vision_model(lowerCAmelCase__ )[1] # pooled_output
__SCREAMING_SNAKE_CASE : List[str] = self.visual_projection(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = cosine_distance(lowerCAmelCase__ , self.special_care_embeds )
__SCREAMING_SNAKE_CASE : Optional[int] = cosine_distance(lowerCAmelCase__ , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
__SCREAMING_SNAKE_CASE : List[Any] = 0.0
__SCREAMING_SNAKE_CASE : Union[str, Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
__SCREAMING_SNAKE_CASE : List[str] = torch.any(special_scores > 0 , dim=1 )
__SCREAMING_SNAKE_CASE : List[str] = special_care * 0.01
__SCREAMING_SNAKE_CASE : int = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
__SCREAMING_SNAKE_CASE : Optional[int] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
__SCREAMING_SNAKE_CASE : Any = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 9 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
SCREAMING_SNAKE_CASE__ : Dict = '''CIDAS/clipseg-rd64-refined'''
SCREAMING_SNAKE_CASE__ : Optional[int] = '''image_segmenter'''
SCREAMING_SNAKE_CASE__ : Tuple = CLIPSegForImageSegmentation
SCREAMING_SNAKE_CASE__ : List[Any] = ['''image''', '''text''']
SCREAMING_SNAKE_CASE__ : str = ['''image''']
def __init__( self :Dict , *lowerCAmelCase__ :str , **lowerCAmelCase__ :str ) -> Tuple:
requires_backends(self , ['''vision'''] )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :"Image" , lowerCAmelCase__ :str ) -> Dict:
return self.pre_processor(text=[label] , images=[image] , padding=lowerCAmelCase__ , return_tensors='''pt''' )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :Tuple ) -> Optional[int]:
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Optional[Any] = self.model(**lowerCAmelCase__ ).logits
return logits
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Optional[int] ) -> Any:
__SCREAMING_SNAKE_CASE : Tuple = outputs.cpu().detach().numpy()
__SCREAMING_SNAKE_CASE : Any = 0
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 9 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__lowerCAmelCase : List[Any] =datasets.load_iris()
__lowerCAmelCase : Tuple =np.array(data['data'])
__lowerCAmelCase : Dict =np.array(data['target'])
__lowerCAmelCase : List[str] =data['target_names']
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : str =train_test_split(X, y)
def _UpperCamelCase ( lowercase__ , lowercase__ ):
return np.linalg.norm(np.array(lowercase__ ) - np.array(lowercase__ ) )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=5 ):
__SCREAMING_SNAKE_CASE : Optional[int] = zip(lowercase__ , lowercase__ )
# List of distances of all points from the point to be classified
__SCREAMING_SNAKE_CASE : Dict = []
for data_point in data:
__SCREAMING_SNAKE_CASE : Tuple = euclidean_distance(data_point[0] , lowercase__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
__SCREAMING_SNAKE_CASE : int = [i[1] for i in sorted(lowercase__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__SCREAMING_SNAKE_CASE : Any = Counter(lowercase__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 9 | 1 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : "DiagonalGaussianDistribution"
class _lowercase ( A__ , A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = True
@register_to_config
def __init__( self :str , lowerCAmelCase__ :int = 3 , lowerCAmelCase__ :int = 3 , lowerCAmelCase__ :Tuple[str] = ("DownEncoderBlock2D",) , lowerCAmelCase__ :Tuple[str] = ("UpDecoderBlock2D",) , lowerCAmelCase__ :Tuple[int] = (64,) , lowerCAmelCase__ :int = 1 , lowerCAmelCase__ :str = "silu" , lowerCAmelCase__ :int = 4 , lowerCAmelCase__ :int = 32 , lowerCAmelCase__ :int = 32 , lowerCAmelCase__ :float = 0.1_8215 , ) -> str:
super().__init__()
# pass init params to Encoder
__SCREAMING_SNAKE_CASE : int = Encoder(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , down_block_types=lowerCAmelCase__ , block_out_channels=lowerCAmelCase__ , layers_per_block=lowerCAmelCase__ , act_fn=lowerCAmelCase__ , norm_num_groups=lowerCAmelCase__ , double_z=lowerCAmelCase__ , )
# pass init params to Decoder
__SCREAMING_SNAKE_CASE : Optional[Any] = Decoder(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , up_block_types=lowerCAmelCase__ , block_out_channels=lowerCAmelCase__ , layers_per_block=lowerCAmelCase__ , norm_num_groups=lowerCAmelCase__ , act_fn=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : str = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
__SCREAMING_SNAKE_CASE : Optional[Any] = False
__SCREAMING_SNAKE_CASE : str = False
# only relevant if vae tiling is enabled
__SCREAMING_SNAKE_CASE : Any = self.config.sample_size
__SCREAMING_SNAKE_CASE : Tuple = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
__SCREAMING_SNAKE_CASE : str = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
__SCREAMING_SNAKE_CASE : int = 0.25
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[int]=False ) -> List[str]:
if isinstance(lowerCAmelCase__ , (Encoder, Decoder) ):
__SCREAMING_SNAKE_CASE : List[str] = value
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :bool = True ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Tuple = use_tiling
def __magic_name__( self :Union[str, Any] ) -> int:
self.enable_tiling(lowerCAmelCase__ )
def __magic_name__( self :Tuple ) -> Tuple:
__SCREAMING_SNAKE_CASE : Any = True
def __magic_name__( self :Optional[Any] ) -> int:
__SCREAMING_SNAKE_CASE : Optional[Any] = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __magic_name__( self :Tuple ) -> Dict[str, AttentionProcessor]:
__SCREAMING_SNAKE_CASE : Dict = {}
def fn_recursive_add_processors(lowerCAmelCase__ :str , lowerCAmelCase__ :torch.nn.Module , lowerCAmelCase__ :Dict[str, AttentionProcessor] ):
if hasattr(lowerCAmelCase__ , '''set_processor''' ):
__SCREAMING_SNAKE_CASE : Optional[int] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' , lowerCAmelCase__ , lowerCAmelCase__ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return processors
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Tuple = len(self.attn_processors.keys() )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(lowerCAmelCase__ ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(lowerCAmelCase__ )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(lowerCAmelCase__ :str , lowerCAmelCase__ :torch.nn.Module , lowerCAmelCase__ :Tuple ):
if hasattr(lowerCAmelCase__ , '''set_processor''' ):
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
module.set_processor(lowerCAmelCase__ )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' , lowerCAmelCase__ , lowerCAmelCase__ )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :Dict ) -> List[Any]:
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :bool = True ) -> AutoencoderKLOutput:
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
if self.use_slicing and x.shape[0] > 1:
__SCREAMING_SNAKE_CASE : List[Any] = [self.encoder(lowerCAmelCase__ ) for x_slice in x.split(1 )]
__SCREAMING_SNAKE_CASE : Tuple = torch.cat(lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : str = self.encoder(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = self.quant_conv(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = DiagonalGaussianDistribution(lowerCAmelCase__ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowerCAmelCase__ )
def __magic_name__( self :str , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.post_quant_conv(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = self.decoder(lowerCAmelCase__ )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase__ )
@apply_forward_hook
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_slicing and z.shape[0] > 1:
__SCREAMING_SNAKE_CASE : Optional[int] = [self._decode(lowerCAmelCase__ ).sample for z_slice in z.split(1 )]
__SCREAMING_SNAKE_CASE : str = torch.cat(lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : Tuple = self._decode(lowerCAmelCase__ ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=lowerCAmelCase__ )
def __magic_name__( self :Dict , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Any ) -> Any:
__SCREAMING_SNAKE_CASE : Tuple = min(a.shape[2] , b.shape[2] , lowerCAmelCase__ )
for y in range(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Any = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def __magic_name__( self :Dict , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :str ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = min(a.shape[3] , b.shape[3] , lowerCAmelCase__ )
for x in range(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : str = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :bool = True ) -> AutoencoderKLOutput:
__SCREAMING_SNAKE_CASE : Optional[int] = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
__SCREAMING_SNAKE_CASE : Optional[int] = int(self.tile_latent_min_size * self.tile_overlap_factor )
__SCREAMING_SNAKE_CASE : List[Any] = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__SCREAMING_SNAKE_CASE : str = []
for i in range(0 , x.shape[2] , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Dict = []
for j in range(0 , x.shape[3] , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Dict = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__SCREAMING_SNAKE_CASE : Any = self.encoder(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = self.quant_conv(lowerCAmelCase__ )
row.append(lowerCAmelCase__ )
rows.append(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = []
for i, row in enumerate(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : str = []
for j, tile in enumerate(lowerCAmelCase__ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__SCREAMING_SNAKE_CASE : int = self.blend_v(rows[i - 1][j] , lowerCAmelCase__ , lowerCAmelCase__ )
if j > 0:
__SCREAMING_SNAKE_CASE : Optional[int] = self.blend_h(row[j - 1] , lowerCAmelCase__ , lowerCAmelCase__ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(lowerCAmelCase__ , dim=3 ) )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.cat(lowerCAmelCase__ , dim=2 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = DiagonalGaussianDistribution(lowerCAmelCase__ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowerCAmelCase__ )
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
__SCREAMING_SNAKE_CASE : Optional[Any] = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
__SCREAMING_SNAKE_CASE : Optional[int] = int(self.tile_sample_min_size * self.tile_overlap_factor )
__SCREAMING_SNAKE_CASE : int = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__SCREAMING_SNAKE_CASE : List[Any] = []
for i in range(0 , z.shape[2] , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : str = []
for j in range(0 , z.shape[3] , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Dict = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__SCREAMING_SNAKE_CASE : List[Any] = self.post_quant_conv(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = self.decoder(lowerCAmelCase__ )
row.append(lowerCAmelCase__ )
rows.append(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = []
for i, row in enumerate(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Tuple = []
for j, tile in enumerate(lowerCAmelCase__ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.blend_v(rows[i - 1][j] , lowerCAmelCase__ , lowerCAmelCase__ )
if j > 0:
__SCREAMING_SNAKE_CASE : List[Any] = self.blend_h(row[j - 1] , lowerCAmelCase__ , lowerCAmelCase__ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(lowerCAmelCase__ , dim=3 ) )
__SCREAMING_SNAKE_CASE : str = torch.cat(lowerCAmelCase__ , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase__ )
def __magic_name__( self :int , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Optional[torch.Generator] = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
__SCREAMING_SNAKE_CASE : str = sample
__SCREAMING_SNAKE_CASE : str = self.encode(lowerCAmelCase__ ).latent_dist
if sample_posterior:
__SCREAMING_SNAKE_CASE : List[str] = posterior.sample(generator=lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : int = posterior.mode()
__SCREAMING_SNAKE_CASE : str = self.decode(lowerCAmelCase__ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase__ )
| 9 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class _lowercase :
'''simple docstring'''
@staticmethod
def __magic_name__( *lowerCAmelCase__ :Union[str, Any] , **lowerCAmelCase__ :str ) -> Union[str, Any]:
pass
def _UpperCamelCase ( lowercase__ ):
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
__lowerCAmelCase : str =(
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def __magic_name__( self :Any , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Any ) -> Any:
__SCREAMING_SNAKE_CASE : Optional[int] = pipeline(
'''document-question-answering''' , model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = INVOICE_URL
__SCREAMING_SNAKE_CASE : Optional[Any] = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , '''''' ) ) )
__SCREAMING_SNAKE_CASE : str = '''What is the placebo?'''
__SCREAMING_SNAKE_CASE : str = [
{
'''image''': load_image(lowerCAmelCase__ ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Tuple ) -> str:
__SCREAMING_SNAKE_CASE : Any = dqa_pipeline(lowerCAmelCase__ , top_k=2 )
self.assertEqual(
lowerCAmelCase__ , [
[
{'''score''': ANY(lowerCAmelCase__ ), '''answer''': ANY(lowerCAmelCase__ ), '''start''': ANY(lowerCAmelCase__ ), '''end''': ANY(lowerCAmelCase__ )},
{'''score''': ANY(lowerCAmelCase__ ), '''answer''': ANY(lowerCAmelCase__ ), '''start''': ANY(lowerCAmelCase__ ), '''end''': ANY(lowerCAmelCase__ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def __magic_name__( self :Dict ) -> List[str]:
__SCREAMING_SNAKE_CASE : Tuple = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
__SCREAMING_SNAKE_CASE : Dict = INVOICE_URL
__SCREAMING_SNAKE_CASE : int = '''How many cats are there?'''
__SCREAMING_SNAKE_CASE : Optional[int] = [
{'''score''': 0.0001, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0001, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
__SCREAMING_SNAKE_CASE : Tuple = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , lowerCAmelCase__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__SCREAMING_SNAKE_CASE : Any = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__SCREAMING_SNAKE_CASE : List[Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(lowerCAmelCase__ , [] )
# We can optionnally pass directly the words and bounding boxes
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : Union[str, Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , words=lowerCAmelCase__ , boxes=lowerCAmelCase__ , top_k=2 )
self.assertEqual(lowerCAmelCase__ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __magic_name__( self :int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
__SCREAMING_SNAKE_CASE : Dict = INVOICE_URL
__SCREAMING_SNAKE_CASE : Any = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : Any = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : Tuple = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : List[Any] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __magic_name__( self :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : int = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = INVOICE_URL
__SCREAMING_SNAKE_CASE : Tuple = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : List[str] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : int = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : str = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __magic_name__( self :int ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=lowerCAmelCase__ , revision='''3dc6de3''' , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = INVOICE_URL
__SCREAMING_SNAKE_CASE : str = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : Dict = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
__SCREAMING_SNAKE_CASE : Optional[int] = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , '''''' ) ) )
# This model should also work if `image` is set to None
__SCREAMING_SNAKE_CASE : str = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __magic_name__( self :str ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=lowerCAmelCase__ , revision='''3dc6de3''' , max_seq_len=50 , )
__SCREAMING_SNAKE_CASE : List[str] = INVOICE_URL
__SCREAMING_SNAKE_CASE : Dict = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : List[Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : Optional[int] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
__SCREAMING_SNAKE_CASE : List[str] = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , '''''' ) ) )
# This model should also work if `image` is set to None
__SCREAMING_SNAKE_CASE : List[Any] = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def __magic_name__( self :Union[str, Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : str = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = INVOICE_URL
__SCREAMING_SNAKE_CASE : Optional[int] = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : Tuple = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def __magic_name__( self :Union[str, Any] ) -> Tuple:
pass
| 9 | 1 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :int ) -> Dict:
__SCREAMING_SNAKE_CASE : Dict = 0
def __magic_name__( self :Dict ) -> Any:
__SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE : Tuple = Path(lowerCAmelCase__ ) / '''preprocessor_config.json'''
__SCREAMING_SNAKE_CASE : List[Any] = Path(lowerCAmelCase__ ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(lowerCAmelCase__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(lowerCAmelCase__ , '''w''' ) )
__SCREAMING_SNAKE_CASE : int = AutoImageProcessor.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :Any ) -> str:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE : str = Path(lowerCAmelCase__ ) / '''preprocessor_config.json'''
__SCREAMING_SNAKE_CASE : Optional[Any] = Path(lowerCAmelCase__ ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(lowerCAmelCase__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(lowerCAmelCase__ , '''w''' ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = AutoImageProcessor.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :int ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE : List[str] = CLIPConfig()
# Create a dummy config file with image_proceesor_type
__SCREAMING_SNAKE_CASE : Tuple = Path(lowerCAmelCase__ ) / '''preprocessor_config.json'''
__SCREAMING_SNAKE_CASE : List[Any] = Path(lowerCAmelCase__ ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(lowerCAmelCase__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(lowerCAmelCase__ , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
__SCREAMING_SNAKE_CASE : Any = AutoImageProcessor.from_pretrained(lowerCAmelCase__ ).to_dict()
config_dict.pop('''image_processor_type''' )
__SCREAMING_SNAKE_CASE : Dict = CLIPImageProcessor(**lowerCAmelCase__ )
# save in new folder
model_config.save_pretrained(lowerCAmelCase__ )
config.save_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained(lowerCAmelCase__ )
# make sure private variable is not incorrectly saved
__SCREAMING_SNAKE_CASE : List[str] = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :str ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE : Tuple = Path(lowerCAmelCase__ ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(lowerCAmelCase__ , '''w''' ) , )
__SCREAMING_SNAKE_CASE : Optional[int] = AutoImageProcessor.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :int ) -> Union[str, Any]:
with self.assertRaisesRegex(
lowerCAmelCase__ , '''clip-base is not a local folder and is not a valid model identifier''' ):
__SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained('''clip-base''' )
def __magic_name__( self :Dict ) -> Dict:
with self.assertRaisesRegex(
lowerCAmelCase__ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
__SCREAMING_SNAKE_CASE : Tuple = AutoImageProcessor.from_pretrained(lowerCAmelCase__ , revision='''aaaaaa''' )
def __magic_name__( self :Tuple ) -> List[str]:
with self.assertRaisesRegex(
lowerCAmelCase__ , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def __magic_name__( self :Optional[Any] ) -> str:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = AutoImageProcessor.from_pretrained(lowerCAmelCase__ , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def __magic_name__( self :Dict ) -> Tuple:
try:
AutoConfig.register('''custom''' , lowerCAmelCase__ )
AutoImageProcessor.register(lowerCAmelCase__ , lowerCAmelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase__ ):
AutoImageProcessor.register(lowerCAmelCase__ , lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE : Dict = Path(lowerCAmelCase__ ) / '''preprocessor_config.json'''
__SCREAMING_SNAKE_CASE : List[Any] = Path(lowerCAmelCase__ ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(lowerCAmelCase__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(lowerCAmelCase__ , '''w''' ) )
__SCREAMING_SNAKE_CASE : List[str] = CustomImageProcessor.from_pretrained(lowerCAmelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def __magic_name__( self :List[Any] ) -> int:
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
try:
AutoConfig.register('''custom''' , lowerCAmelCase__ )
AutoImageProcessor.register(lowerCAmelCase__ , lowerCAmelCase__ )
# If remote code is not set, the default is to use local
__SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
__SCREAMING_SNAKE_CASE : List[str] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
__SCREAMING_SNAKE_CASE : Optional[int] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(lowerCAmelCase__ , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 9 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : Any ={'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int =[
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 9 | 1 |
__lowerCAmelCase : str =8.3_1_4_4_6_2 # Unit - J mol-1 K-1
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 9 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :int , lowerCAmelCase__ :NestedDataStructureLike[PathLike] , lowerCAmelCase__ :Optional[NamedSplit] = None , lowerCAmelCase__ :Optional[Features] = None , lowerCAmelCase__ :str = None , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[int] = None , **lowerCAmelCase__ :Optional[int] , ) -> Tuple:
super().__init__(
lowerCAmelCase__ , split=lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ , streaming=lowerCAmelCase__ , num_proc=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : List[str] = path_or_paths if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else {self.split: path_or_paths}
__SCREAMING_SNAKE_CASE : int = Text(
cache_dir=lowerCAmelCase__ , data_files=lowerCAmelCase__ , features=lowerCAmelCase__ , **lowerCAmelCase__ , )
def __magic_name__( self :Dict ) -> Tuple:
# Build iterable dataset
if self.streaming:
__SCREAMING_SNAKE_CASE : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__SCREAMING_SNAKE_CASE : List[str] = None
__SCREAMING_SNAKE_CASE : str = None
__SCREAMING_SNAKE_CASE : Dict = None
__SCREAMING_SNAKE_CASE : Tuple = None
self.builder.download_and_prepare(
download_config=lowerCAmelCase__ , download_mode=lowerCAmelCase__ , verification_mode=lowerCAmelCase__ , base_path=lowerCAmelCase__ , num_proc=self.num_proc , )
__SCREAMING_SNAKE_CASE : Optional[int] = self.builder.as_dataset(
split=self.split , verification_mode=lowerCAmelCase__ , in_memory=self.keep_in_memory )
return dataset
| 9 | 1 |
from __future__ import annotations
class _lowercase :
'''simple docstring'''
def __init__( self :Optional[int] , lowerCAmelCase__ :list[list[int]] ) -> str:
__SCREAMING_SNAKE_CASE : Optional[Any] = TypeError(
'''Matrices must be formed from a list of zero or more lists containing at '''
'''least one and the same number of values, each of which must be of type '''
'''int or float.''' )
if len(lowerCAmelCase__ ) != 0:
__SCREAMING_SNAKE_CASE : Tuple = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(lowerCAmelCase__ ) != cols:
raise error
for value in row:
if not isinstance(lowerCAmelCase__ , (int, float) ):
raise error
__SCREAMING_SNAKE_CASE : Optional[Any] = rows
else:
__SCREAMING_SNAKE_CASE : Tuple = []
def __magic_name__( self :Dict ) -> list[list[int]]:
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def __magic_name__( self :Any ) -> int:
return len(self.rows )
@property
def __magic_name__( self :List[Any] ) -> int:
return len(self.rows[0] )
@property
def __magic_name__( self :int ) -> tuple[int, int]:
return (self.num_rows, self.num_columns)
@property
def __magic_name__( self :List[Any] ) -> bool:
return self.order[0] == self.order[1]
def __magic_name__( self :Optional[Any] ) -> Matrix:
__SCREAMING_SNAKE_CASE : Dict = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(lowerCAmelCase__ )
def __magic_name__( self :Dict ) -> int:
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def __magic_name__( self :List[Any] ) -> bool:
return bool(self.determinant() )
def __magic_name__( self :List[str] , lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> int:
__SCREAMING_SNAKE_CASE : Any = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(lowerCAmelCase__ ).determinant()
def __magic_name__( self :List[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> int:
if (row + column) % 2 == 0:
return self.get_minor(lowerCAmelCase__ , lowerCAmelCase__ )
return -1 * self.get_minor(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> Matrix:
return Matrix(
[
[self.get_minor(lowerCAmelCase__ , lowerCAmelCase__ ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def __magic_name__( self :List[str] ) -> Matrix:
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def __magic_name__( self :str ) -> Matrix:
__SCREAMING_SNAKE_CASE : List[str] = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(lowerCAmelCase__ )
def __magic_name__( self :Dict ) -> Matrix:
__SCREAMING_SNAKE_CASE : Dict = self.determinant()
if not determinant:
raise TypeError('''Only matrices with a non-zero determinant have an inverse''' )
return self.adjugate() * (1 / determinant)
def __repr__( self :Any ) -> str:
return str(self.rows )
def __str__( self :List[Any] ) -> str:
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'''[''' + '''. '''.join([str(lowerCAmelCase__ ) for value in row] ) + '''.]'''
for row in self.rows
] )
+ "]"
)
def __magic_name__( self :List[Any] , lowerCAmelCase__ :list[int] , lowerCAmelCase__ :int | None = None ) -> None:
__SCREAMING_SNAKE_CASE : Optional[Any] = TypeError('''Row must be a list containing all ints and/or floats''' )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise type_error
for value in row:
if not isinstance(lowerCAmelCase__ , (int, float) ):
raise type_error
if len(lowerCAmelCase__ ) != self.num_columns:
raise ValueError(
'''Row must be equal in length to the other rows in the matrix''' )
if position is None:
self.rows.append(lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : Dict = self.rows[0:position] + [row] + self.rows[position:]
def __magic_name__( self :Tuple , lowerCAmelCase__ :list[int] , lowerCAmelCase__ :int | None = None ) -> None:
__SCREAMING_SNAKE_CASE : List[str] = TypeError(
'''Column must be a list containing all ints and/or floats''' )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise type_error
for value in column:
if not isinstance(lowerCAmelCase__ , (int, float) ):
raise type_error
if len(lowerCAmelCase__ ) != self.num_rows:
raise ValueError(
'''Column must be equal in length to the other columns in the matrix''' )
if position is None:
__SCREAMING_SNAKE_CASE : str = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self :Optional[Any] , lowerCAmelCase__ :object ) -> bool:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return NotImplemented
return self.rows == other.rows
def __ne__( self :Any , lowerCAmelCase__ :object ) -> bool:
return not self == other
def __neg__( self :Any ) -> Matrix:
return self * -1
def __add__( self :List[Any] , lowerCAmelCase__ :Matrix ) -> Matrix:
if self.order != other.order:
raise ValueError('''Addition requires matrices of the same order''' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self :Dict , lowerCAmelCase__ :Matrix ) -> Matrix:
if self.order != other.order:
raise ValueError('''Subtraction requires matrices of the same order''' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self :Optional[int] , lowerCAmelCase__ :Matrix | int | float ) -> Matrix:
if isinstance(lowerCAmelCase__ , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
if self.num_columns != other.num_rows:
raise ValueError(
'''The number of columns in the first matrix must '''
'''be equal to the number of rows in the second''' )
return Matrix(
[
[Matrix.dot_product(lowerCAmelCase__ , lowerCAmelCase__ ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'''A Matrix can only be multiplied by an int, float, or another matrix''' )
def __pow__( self :Union[str, Any] , lowerCAmelCase__ :int ) -> Matrix:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError('''A Matrix can only be raised to the power of an int''' )
if not self.is_square:
raise ValueError('''Only square matrices can be raised to a power''' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'''Only invertable matrices can be raised to a negative power''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def __magic_name__( cls :Optional[int] , lowerCAmelCase__ :list[int] , lowerCAmelCase__ :list[int] ) -> int:
return sum(row[i] * column[i] for i in range(len(lowerCAmelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Dict = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
__SCREAMING_SNAKE_CASE : List[str] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert('''RGB''' )
return image
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = dct.pop(lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = val
def _UpperCamelCase ( lowercase__ , lowercase__ ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
__SCREAMING_SNAKE_CASE : int = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__SCREAMING_SNAKE_CASE : Optional[int] = torch.cat((q_bias, torch.zeros_like(lowercase__ , requires_grad=lowercase__ ), v_bias) )
__SCREAMING_SNAKE_CASE : Optional[Any] = qkv_bias
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Any = 364 if '''coco''' in model_name else 224
__SCREAMING_SNAKE_CASE : List[str] = BlipaVisionConfig(image_size=lowercase__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=lowercase__ ).to_dict()
elif "opt-6.7b" in model_name:
__SCREAMING_SNAKE_CASE : List[Any] = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=lowercase__ ).to_dict()
elif "t5-xl" in model_name:
__SCREAMING_SNAKE_CASE : Optional[Any] = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
__SCREAMING_SNAKE_CASE : Optional[int] = BlipaConfig(vision_config=lowercase__ , text_config=lowercase__ )
return config, image_size
@torch.no_grad()
def _UpperCamelCase ( lowercase__ , lowercase__=None , lowercase__=False ):
__SCREAMING_SNAKE_CASE : Any = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
__SCREAMING_SNAKE_CASE : str = tokenizer('''\n''' , add_special_tokens=lowercase__ ).input_ids[0]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = get_blipa_config(lowercase__ , eos_token_id=lowercase__ )
__SCREAMING_SNAKE_CASE : int = BlipaForConditionalGeneration(lowercase__ ).eval()
__SCREAMING_SNAKE_CASE : int = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
__SCREAMING_SNAKE_CASE : List[str] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = load_model_and_preprocess(
name=lowercase__ , model_type=lowercase__ , is_eval=lowercase__ , device=lowercase__ )
original_model.eval()
print('''Done!''' )
# update state dict keys
__SCREAMING_SNAKE_CASE : List[str] = original_model.state_dict()
__SCREAMING_SNAKE_CASE : Optional[int] = create_rename_keys(lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__SCREAMING_SNAKE_CASE : Tuple = state_dict.pop(lowercase__ )
if key.startswith('''Qformer.bert''' ):
__SCREAMING_SNAKE_CASE : List[str] = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
__SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
__SCREAMING_SNAKE_CASE : Dict = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
__SCREAMING_SNAKE_CASE : Tuple = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
__SCREAMING_SNAKE_CASE : List[str] = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
__SCREAMING_SNAKE_CASE : Tuple = key.replace('''t5''' , '''language''' )
__SCREAMING_SNAKE_CASE : Tuple = val
# read in qv biases
read_in_q_v_bias(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = hf_model.load_state_dict(lowercase__ , strict=lowercase__ )
assert len(lowercase__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__SCREAMING_SNAKE_CASE : List[str] = load_demo_image()
__SCREAMING_SNAKE_CASE : Any = vis_processors['''eval'''](lowercase__ ).unsqueeze(0 ).to(lowercase__ )
__SCREAMING_SNAKE_CASE : str = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(lowercase__ )
# create processor
__SCREAMING_SNAKE_CASE : List[Any] = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=lowercase__ , image_std=lowercase__ )
__SCREAMING_SNAKE_CASE : int = BlipaProcessor(image_processor=lowercase__ , tokenizer=lowercase__ )
__SCREAMING_SNAKE_CASE : Any = processor(images=lowercase__ , return_tensors='''pt''' ).pixel_values.to(lowercase__ )
# make sure processor creates exact same pixel values
assert torch.allclose(lowercase__ , lowercase__ )
original_model.to(lowercase__ )
hf_model.to(lowercase__ )
with torch.no_grad():
if "opt" in model_name:
__SCREAMING_SNAKE_CASE : Dict = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
__SCREAMING_SNAKE_CASE : Dict = hf_model(lowercase__ , lowercase__ ).logits
else:
__SCREAMING_SNAKE_CASE : int = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
__SCREAMING_SNAKE_CASE : List[Any] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
__SCREAMING_SNAKE_CASE : Optional[int] = hf_model(lowercase__ , lowercase__ , labels=lowercase__ ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__SCREAMING_SNAKE_CASE : Dict = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=lowercase__ )
assert torch.allclose(logits[0, :3, :3] , lowercase__ , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__SCREAMING_SNAKE_CASE : Any = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=lowercase__ )
else:
# cast to same type
__SCREAMING_SNAKE_CASE : Optional[Any] = logits.dtype
assert torch.allclose(original_logits.to(lowercase__ ) , lowercase__ , atol=1e-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
__SCREAMING_SNAKE_CASE : Any = ''''''
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(lowercase__ , return_tensors='''pt''' ).input_ids.to(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = original_model.generate({'''image''': original_pixel_values} )
__SCREAMING_SNAKE_CASE : Union[str, Any] = hf_model.generate(
lowercase__ , lowercase__ , do_sample=lowercase__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = input_ids.shape[1]
__SCREAMING_SNAKE_CASE : Any = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = [text.strip() for text in output_text]
print('''HF generation:''' , lowercase__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(lowercase__ )
hf_model.save_pretrained(lowercase__ )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
__lowerCAmelCase : List[str] =argparse.ArgumentParser()
__lowerCAmelCase : Tuple =[
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
__lowerCAmelCase : List[Any] =parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 9 | 1 |
def _UpperCamelCase ( lowercase__ ):
if num <= 0:
raise ValueError('''Input must be a positive integer''' )
__SCREAMING_SNAKE_CASE : Tuple = [True] * (num + 1)
__SCREAMING_SNAKE_CASE : Dict = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , lowercase__ ):
__SCREAMING_SNAKE_CASE : str = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : List[str] =int(input('Enter a positive integer: ').strip())
print(prime_sieve_eratosthenes(user_num))
| 9 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
__lowerCAmelCase : Optional[Any] ={'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : List[str] ={
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
__lowerCAmelCase : Optional[int] ={
'gpt-neox-20b': 2_0_4_8,
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Optional[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self :int , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :List[Any]=None , lowerCAmelCase__ :str="<|endoftext|>" , lowerCAmelCase__ :str="<|endoftext|>" , lowerCAmelCase__ :Dict="<|endoftext|>" , lowerCAmelCase__ :Union[str, Any]=False , **lowerCAmelCase__ :List[str] , ) -> Any:
super().__init__(
lowerCAmelCase__ , lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , lowerCAmelCase__ ) != add_prefix_space:
__SCREAMING_SNAKE_CASE : List[str] = getattr(lowerCAmelCase__ , pre_tok_state.pop('''type''' ) )
__SCREAMING_SNAKE_CASE : str = add_prefix_space
__SCREAMING_SNAKE_CASE : Any = pre_tok_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = add_prefix_space
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]:
__SCREAMING_SNAKE_CASE : List[str] = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :"Conversation" ) -> List[int]:
__SCREAMING_SNAKE_CASE : Optional[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) + [self.eos_token_id] )
if len(lowerCAmelCase__ ) > self.model_max_length:
__SCREAMING_SNAKE_CASE : List[str] = input_ids[-self.model_max_length :]
return input_ids
| 9 | 1 |
from __future__ import annotations
import os
from collections.abc import Mapping
__lowerCAmelCase : Optional[int] =tuple[int, int]
class _lowercase :
'''simple docstring'''
def __init__( self :Optional[Any] , lowerCAmelCase__ :set[int] , lowerCAmelCase__ :Mapping[EdgeT, int] ) -> None:
__SCREAMING_SNAKE_CASE : set[int] = vertices
__SCREAMING_SNAKE_CASE : dict[EdgeT, int] = {
(min(lowerCAmelCase__ ), max(lowerCAmelCase__ )): weight for edge, weight in edges.items()
}
def __magic_name__( self :List[str] , lowerCAmelCase__ :EdgeT , lowerCAmelCase__ :int ) -> None:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
__SCREAMING_SNAKE_CASE : List[str] = weight
def __magic_name__( self :List[str] ) -> Graph:
__SCREAMING_SNAKE_CASE : Graph = Graph({min(self.vertices )} , {} )
__SCREAMING_SNAKE_CASE : EdgeT
__SCREAMING_SNAKE_CASE : int
__SCREAMING_SNAKE_CASE : EdgeT
__SCREAMING_SNAKE_CASE : int
while len(subgraph.vertices ) < len(self.vertices ):
__SCREAMING_SNAKE_CASE : int = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
__SCREAMING_SNAKE_CASE : Union[str, Any] = edge
__SCREAMING_SNAKE_CASE : Dict = weight
subgraph.add_edge(lowerCAmelCase__ , lowerCAmelCase__ )
return subgraph
def _UpperCamelCase ( lowercase__ = "p107_network.txt" ):
__SCREAMING_SNAKE_CASE : str = os.path.abspath(os.path.dirname(lowercase__ ) )
__SCREAMING_SNAKE_CASE : str = os.path.join(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE : dict[EdgeT, int] = {}
__SCREAMING_SNAKE_CASE : list[str]
__SCREAMING_SNAKE_CASE : int
__SCREAMING_SNAKE_CASE : int
with open(lowercase__ ) as f:
__SCREAMING_SNAKE_CASE : int = f.read().strip().split('''\n''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = [line.split(''',''' ) for line in data]
for edgea in range(1 , len(lowercase__ ) ):
for edgea in range(lowercase__ ):
if adjaceny_matrix[edgea][edgea] != "-":
__SCREAMING_SNAKE_CASE : List[str] = int(adjaceny_matrix[edgea][edgea] )
__SCREAMING_SNAKE_CASE : Graph = Graph(set(range(len(lowercase__ ) ) ) , lowercase__ )
__SCREAMING_SNAKE_CASE : Graph = graph.prims_algorithm()
__SCREAMING_SNAKE_CASE : int = sum(graph.edges.values() )
__SCREAMING_SNAKE_CASE : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 9 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__lowerCAmelCase : Optional[Any] ='\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
__lowerCAmelCase : Any ='\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
__lowerCAmelCase : Optional[Any] =r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def __magic_name__( self :Optional[Any] ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def __magic_name__( self :Any , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Tuple = 0.0
for i, j in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
n_correct += 1.0 if math_equivalence.is_equiv(lowerCAmelCase__ , lowerCAmelCase__ ) else 0.0
__SCREAMING_SNAKE_CASE : str = n_correct / len(lowerCAmelCase__ )
return {
"accuracy": accuracy,
}
| 9 | 1 |
from typing import Dict, Optional
import numpy as np
import datasets
__lowerCAmelCase : Dict ='\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n'
__lowerCAmelCase : List[Any] ='\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n'
__lowerCAmelCase : Optional[int] ='\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}'
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = False , ):
if label_map is not None:
for old_id, new_id in label_map.items():
__SCREAMING_SNAKE_CASE : Dict = new_id
# turn into Numpy arrays
__SCREAMING_SNAKE_CASE : List[str] = np.array(lowercase__ )
__SCREAMING_SNAKE_CASE : Any = np.array(lowercase__ )
if reduce_labels:
__SCREAMING_SNAKE_CASE : int = 255
__SCREAMING_SNAKE_CASE : List[str] = label - 1
__SCREAMING_SNAKE_CASE : Any = 255
__SCREAMING_SNAKE_CASE : Union[str, Any] = label != ignore_index
__SCREAMING_SNAKE_CASE : Optional[Any] = np.not_equal(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE : str = pred_label[mask]
__SCREAMING_SNAKE_CASE : int = np.array(lowercase__ )[mask]
__SCREAMING_SNAKE_CASE : List[Any] = pred_label[pred_label == label]
__SCREAMING_SNAKE_CASE : str = np.histogram(lowercase__ , bins=lowercase__ , range=(0, num_labels - 1) )[0]
__SCREAMING_SNAKE_CASE : List[str] = np.histogram(lowercase__ , bins=lowercase__ , range=(0, num_labels - 1) )[0]
__SCREAMING_SNAKE_CASE : Optional[Any] = np.histogram(lowercase__ , bins=lowercase__ , range=(0, num_labels - 1) )[0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = False , ):
__SCREAMING_SNAKE_CASE : int = np.zeros((num_labels,) , dtype=np.floataa )
__SCREAMING_SNAKE_CASE : Any = np.zeros((num_labels,) , dtype=np.floataa )
__SCREAMING_SNAKE_CASE : str = np.zeros((num_labels,) , dtype=np.floataa )
__SCREAMING_SNAKE_CASE : Any = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[int] = intersect_and_union(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = False , ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = total_intersect_and_union(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# compute metrics
__SCREAMING_SNAKE_CASE : Optional[int] = {}
__SCREAMING_SNAKE_CASE : Optional[int] = total_area_intersect.sum() / total_area_label.sum()
__SCREAMING_SNAKE_CASE : Union[str, Any] = total_area_intersect / total_area_union
__SCREAMING_SNAKE_CASE : Optional[Any] = total_area_intersect / total_area_label
__SCREAMING_SNAKE_CASE : Dict = np.nanmean(lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = np.nanmean(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = all_acc
__SCREAMING_SNAKE_CASE : List[Any] = iou
__SCREAMING_SNAKE_CASE : Optional[Any] = acc
if nan_to_num is not None:
__SCREAMING_SNAKE_CASE : List[str] = {metric: np.nan_to_num(lowercase__ , nan=lowercase__ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def __magic_name__( self :Optional[Any] ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
'''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
} ) , reference_urls=[
'''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'''
] , )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :int , lowerCAmelCase__ :bool , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :Optional[Dict[int, int]] = None , lowerCAmelCase__ :bool = False , ) -> Tuple:
__SCREAMING_SNAKE_CASE : Any = mean_iou(
results=lowerCAmelCase__ , gt_seg_maps=lowerCAmelCase__ , num_labels=lowerCAmelCase__ , ignore_index=lowerCAmelCase__ , nan_to_num=lowerCAmelCase__ , label_map=lowerCAmelCase__ , reduce_labels=lowerCAmelCase__ , )
return iou_result
| 9 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _lowercase :
'''simple docstring'''
def __init__( self :Optional[int] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :int=13 , lowerCAmelCase__ :List[str]=7 , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :str=True , lowerCAmelCase__ :List[Any]=99 , lowerCAmelCase__ :List[str]=32 , lowerCAmelCase__ :Any=5 , lowerCAmelCase__ :List[str]=4 , lowerCAmelCase__ :int=37 , lowerCAmelCase__ :Optional[int]="gelu" , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Optional[Any]=512 , lowerCAmelCase__ :Union[str, Any]=16 , lowerCAmelCase__ :Dict=2 , lowerCAmelCase__ :Tuple=0.02 , lowerCAmelCase__ :List[Any]=3 , lowerCAmelCase__ :Tuple=4 , lowerCAmelCase__ :int=None , ) -> int:
__SCREAMING_SNAKE_CASE : Dict = parent
__SCREAMING_SNAKE_CASE : Any = batch_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = seq_length
__SCREAMING_SNAKE_CASE : Optional[Any] = is_training
__SCREAMING_SNAKE_CASE : int = use_token_type_ids
__SCREAMING_SNAKE_CASE : Any = use_labels
__SCREAMING_SNAKE_CASE : Any = vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = hidden_size
__SCREAMING_SNAKE_CASE : int = num_hidden_layers
__SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : str = intermediate_size
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
__SCREAMING_SNAKE_CASE : List[str] = type_sequence_label_size
__SCREAMING_SNAKE_CASE : List[str] = initializer_range
__SCREAMING_SNAKE_CASE : Tuple = num_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_choices
__SCREAMING_SNAKE_CASE : Union[str, Any] = scope
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.vocab_size - 1
def __magic_name__( self :Optional[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE : Dict = None
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE : Optional[int] = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__SCREAMING_SNAKE_CASE : Any = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __magic_name__( self :Tuple , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Any , *lowerCAmelCase__ :Union[str, Any] ) -> Any:
__SCREAMING_SNAKE_CASE : Any = OpenAIGPTModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Dict = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , head_mask=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Dict , *lowerCAmelCase__ :List[Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = OpenAIGPTLMHeadModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__( self :Tuple , lowerCAmelCase__ :Dict , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , *lowerCAmelCase__ :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : Any = OpenAIGPTDoubleHeadsModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Any = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__( self :Dict , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :str , *lowerCAmelCase__ :Optional[int] ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
__SCREAMING_SNAKE_CASE : List[Any] = OpenAIGPTForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__( self :Optional[Any] ) -> str:
__SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) : List[str] = config_and_inputs
__SCREAMING_SNAKE_CASE : List[str] = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class _lowercase ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : str = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
SCREAMING_SNAKE_CASE__ : str = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :str , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any] ) -> Tuple:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def __magic_name__( self :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :int , lowerCAmelCase__ :int=False ) -> Dict:
__SCREAMING_SNAKE_CASE : Tuple = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__SCREAMING_SNAKE_CASE : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Tuple = inputs_dict['''labels''']
__SCREAMING_SNAKE_CASE : Dict = inputs_dict['''labels''']
__SCREAMING_SNAKE_CASE : List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
return inputs_dict
def __magic_name__( self :Optional[int] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : int = OpenAIGPTModelTester(self )
__SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase__ , n_embd=37 )
def __magic_name__( self :Any ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __magic_name__( self :List[str] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowerCAmelCase__ )
def __magic_name__( self :int ) -> int:
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> Dict:
__SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> str:
__SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowerCAmelCase__ )
@slow
def __magic_name__( self :Any ) -> List[Any]:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Dict = OpenAIGPTModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __magic_name__( self :Union[str, Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[str] = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[481, 4_735, 544]] , dtype=torch.long , device=lowerCAmelCase__ ) # the president is
__SCREAMING_SNAKE_CASE : Dict = [
481,
4_735,
544,
246,
963,
870,
762,
239,
244,
40_477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__SCREAMING_SNAKE_CASE : Dict = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ )
self.assertListEqual(output_ids[0].tolist() , lowerCAmelCase__ )
| 9 | 1 |
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :List[str] , lowerCAmelCase__ :Distribution , lowerCAmelCase__ :str=None , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :Any=0 ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = 1.0 if scale is None else scale
__SCREAMING_SNAKE_CASE : List[str] = 0.0 if loc is None else loc
super().__init__(lowerCAmelCase__ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowerCAmelCase__ )] )
@property
def __magic_name__( self :List[Any] ) -> Any:
return self.base_dist.mean * self.scale + self.loc
@property
def __magic_name__( self :List[str] ) -> List[Any]:
return self.base_dist.variance * self.scale**2
@property
def __magic_name__( self :Union[str, Any] ) -> Optional[int]:
return self.variance.sqrt()
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self :Optional[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Dict[str, int] , lowerCAmelCase__ :Callable[..., Tuple[torch.Tensor]] , **lowerCAmelCase__ :Tuple ) -> None:
super().__init__(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = args_dim
__SCREAMING_SNAKE_CASE : Tuple = nn.ModuleList([nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ ) for dim in args_dim.values()] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = domain_map
def __magic_name__( self :int , lowerCAmelCase__ :torch.Tensor ) -> Tuple[torch.Tensor]:
__SCREAMING_SNAKE_CASE : str = [proj(lowerCAmelCase__ ) for proj in self.proj]
return self.domain_map(*lowerCAmelCase__ )
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self :List[str] , lowerCAmelCase__ :Dict ) -> Dict:
super().__init__()
__SCREAMING_SNAKE_CASE : int = function
def __magic_name__( self :int , lowerCAmelCase__ :Optional[int] , *lowerCAmelCase__ :Optional[int] ) -> Optional[Any]:
return self.function(lowerCAmelCase__ , *lowerCAmelCase__ )
class _lowercase :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : type
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : Dict[str, int]
def __init__( self :List[str] , lowerCAmelCase__ :int = 1 ) -> None:
__SCREAMING_SNAKE_CASE : str = dim
__SCREAMING_SNAKE_CASE : str = {k: dim * self.args_dim[k] for k in self.args_dim}
def __magic_name__( self :Dict , lowerCAmelCase__ :int ) -> Dict:
if self.dim == 1:
return self.distribution_class(*lowerCAmelCase__ )
else:
return Independent(self.distribution_class(*lowerCAmelCase__ ) , 1 )
def __magic_name__( self :Tuple , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[torch.Tensor] = None , lowerCAmelCase__ :Optional[torch.Tensor] = None , ) -> Distribution:
__SCREAMING_SNAKE_CASE : List[Any] = self._base_distribution(lowerCAmelCase__ )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(lowerCAmelCase__ , loc=lowerCAmelCase__ , scale=lowerCAmelCase__ , event_dim=self.event_dim )
@property
def __magic_name__( self :int ) -> Tuple:
return () if self.dim == 1 else (self.dim,)
@property
def __magic_name__( self :int ) -> int:
return len(self.event_shape )
@property
def __magic_name__( self :Union[str, Any] ) -> float:
return 0.0
def __magic_name__( self :List[Any] , lowerCAmelCase__ :int ) -> nn.Module:
return ParameterProjection(
in_features=lowerCAmelCase__ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def __magic_name__( self :int , *lowerCAmelCase__ :torch.Tensor ) -> List[str]:
raise NotImplementedError()
@staticmethod
def __magic_name__( lowerCAmelCase__ :torch.Tensor ) -> torch.Tensor:
return (x + torch.sqrt(torch.square(lowerCAmelCase__ ) + 4.0 )) / 2.0
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
SCREAMING_SNAKE_CASE__ : type = StudentT
@classmethod
def __magic_name__( cls :List[Any] , lowerCAmelCase__ :torch.Tensor , lowerCAmelCase__ :torch.Tensor , lowerCAmelCase__ :torch.Tensor ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = cls.squareplus(lowerCAmelCase__ ).clamp_min(torch.finfo(scale.dtype ).eps )
__SCREAMING_SNAKE_CASE : str = 2.0 + cls.squareplus(lowerCAmelCase__ )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict[str, int] = {"loc": 1, "scale": 1}
SCREAMING_SNAKE_CASE__ : type = Normal
@classmethod
def __magic_name__( cls :Union[str, Any] , lowerCAmelCase__ :torch.Tensor , lowerCAmelCase__ :torch.Tensor ) -> Any:
__SCREAMING_SNAKE_CASE : Dict = cls.squareplus(lowerCAmelCase__ ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict[str, int] = {"total_count": 1, "logits": 1}
SCREAMING_SNAKE_CASE__ : type = NegativeBinomial
@classmethod
def __magic_name__( cls :List[str] , lowerCAmelCase__ :torch.Tensor , lowerCAmelCase__ :torch.Tensor ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = cls.squareplus(lowerCAmelCase__ )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :Optional[int] ) -> Distribution:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = distr_args
if self.dim == 1:
return self.distribution_class(total_count=lowerCAmelCase__ , logits=lowerCAmelCase__ )
else:
return Independent(self.distribution_class(total_count=lowerCAmelCase__ , logits=lowerCAmelCase__ ) , 1 )
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Optional[torch.Tensor] = None , lowerCAmelCase__ :Optional[torch.Tensor] = None ) -> Distribution:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 9 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _UpperCamelCase ( *lowercase__ , lowercase__ = None , lowercase__=True , lowercase__=2 ):
from .. import __version__
__SCREAMING_SNAKE_CASE : Optional[Any] = take_from
__SCREAMING_SNAKE_CASE : List[str] = ()
if not isinstance(args[0] , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowercase__ ).base_version ) >= version.parse(lowercase__ ):
raise ValueError(
F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
F''' version {__version__} is >= {version_name}''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if isinstance(lowercase__ , lowercase__ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowercase__ ),)
__SCREAMING_SNAKE_CASE : List[Any] = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(lowercase__ , lowercase__ ):
values += (getattr(lowercase__ , lowercase__ ),)
__SCREAMING_SNAKE_CASE : List[str] = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
__SCREAMING_SNAKE_CASE : str = F'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
__SCREAMING_SNAKE_CASE : Any = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , lowercase__ , stacklevel=lowercase__ )
if isinstance(lowercase__ , lowercase__ ) and len(lowercase__ ) > 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1]
__SCREAMING_SNAKE_CASE : Dict = call_frame.filename
__SCREAMING_SNAKE_CASE : Optional[Any] = call_frame.lineno
__SCREAMING_SNAKE_CASE : int = call_frame.function
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(lowercase__ ) == 0:
return
elif len(lowercase__ ) == 1:
return values[0]
return values
| 9 | 1 |
import torch
from diffusers import StableDiffusionPipeline
__lowerCAmelCase : Dict ='path-to-your-trained-model'
__lowerCAmelCase : int =StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('cuda')
__lowerCAmelCase : Dict ='A photo of sks dog in a bucket'
__lowerCAmelCase : Dict =pipe(prompt, num_inference_steps=5_0, guidance_scale=7.5).images[0]
image.save('dog-bucket.png')
| 9 |
from __future__ import annotations
import bisect
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 0 , lowercase__ = -1 ):
if hi < 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = len(lowercase__ )
while lo < hi:
__SCREAMING_SNAKE_CASE : Any = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__SCREAMING_SNAKE_CASE : Union[str, Any] = mid + 1
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = mid
return lo
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 0 , lowercase__ = -1 ):
if hi < 0:
__SCREAMING_SNAKE_CASE : List[Any] = len(lowercase__ )
while lo < hi:
__SCREAMING_SNAKE_CASE : Optional[int] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__SCREAMING_SNAKE_CASE : Any = mid + 1
else:
__SCREAMING_SNAKE_CASE : Optional[int] = mid
return lo
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 0 , lowercase__ = -1 ):
sorted_collection.insert(bisect_left(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) , lowercase__ )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 0 , lowercase__ = -1 ):
sorted_collection.insert(bisect_right(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) , lowercase__ )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Any = 0
__SCREAMING_SNAKE_CASE : List[Any] = len(lowercase__ ) - 1
while left <= right:
__SCREAMING_SNAKE_CASE : str = left + (right - left) // 2
__SCREAMING_SNAKE_CASE : List[str] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__SCREAMING_SNAKE_CASE : int = midpoint - 1
else:
__SCREAMING_SNAKE_CASE : Dict = midpoint + 1
return None
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = bisect.bisect_left(lowercase__ , lowercase__ )
if index != len(lowercase__ ) and sorted_collection[index] == item:
return index
return None
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
if right < left:
return None
__SCREAMING_SNAKE_CASE : int = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(lowercase__ , lowercase__ , lowercase__ , midpoint - 1 )
else:
return binary_search_by_recursion(lowercase__ , lowercase__ , midpoint + 1 , lowercase__ )
if __name__ == "__main__":
__lowerCAmelCase : Dict =input('Enter numbers separated by comma:\n').strip()
__lowerCAmelCase : str =sorted(int(item) for item in user_input.split(','))
__lowerCAmelCase : Tuple =int(input('Enter a single number to be found in the list:\n'))
__lowerCAmelCase : Tuple =binary_search(collection, target)
if result is None:
print(f"""{target} was not found in {collection}.""")
else:
print(f"""{target} was found at position {result} in {collection}.""")
| 9 | 1 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self :Optional[int] , lowerCAmelCase__ :int = 16 , lowerCAmelCase__ :int = 88 , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :int = 1 , lowerCAmelCase__ :float = 0.0 , lowerCAmelCase__ :int = 32 , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :str = "geglu" , lowerCAmelCase__ :Optional[int] = None , ) -> int:
super().__init__()
__SCREAMING_SNAKE_CASE : int = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=lowerCAmelCase__ , attention_head_dim=lowerCAmelCase__ , in_channels=lowerCAmelCase__ , num_layers=lowerCAmelCase__ , dropout=lowerCAmelCase__ , norm_num_groups=lowerCAmelCase__ , cross_attention_dim=lowerCAmelCase__ , attention_bias=lowerCAmelCase__ , sample_size=lowerCAmelCase__ , num_vector_embeds=lowerCAmelCase__ , activation_fn=lowerCAmelCase__ , num_embeds_ada_norm=lowerCAmelCase__ , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
__SCREAMING_SNAKE_CASE : List[str] = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
__SCREAMING_SNAKE_CASE : Optional[int] = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
__SCREAMING_SNAKE_CASE : List[str] = [1, 0]
def __magic_name__( self :Dict , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :List[Any]=None , lowerCAmelCase__ :List[Any]=None , lowerCAmelCase__ :bool = True , ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : List[Any] = hidden_states
__SCREAMING_SNAKE_CASE : Dict = []
__SCREAMING_SNAKE_CASE : Optional[int] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
__SCREAMING_SNAKE_CASE : Optional[Any] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
__SCREAMING_SNAKE_CASE : str = self.transformer_index_for_condition[i]
__SCREAMING_SNAKE_CASE : Optional[Any] = self.transformers[transformer_index](
lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , timestep=lowerCAmelCase__ , cross_attention_kwargs=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
__SCREAMING_SNAKE_CASE : Tuple = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
__SCREAMING_SNAKE_CASE : List[str] = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=lowerCAmelCase__ )
| 9 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Union[str, Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : str = '''ylacombe/bark-small'''
__SCREAMING_SNAKE_CASE : Optional[int] = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : str = '''en_speaker_1'''
__SCREAMING_SNAKE_CASE : Any = '''This is a test string'''
__SCREAMING_SNAKE_CASE : int = '''speaker_embeddings_path.json'''
__SCREAMING_SNAKE_CASE : int = '''speaker_embeddings'''
def __magic_name__( self :List[str] , **lowerCAmelCase__ :Union[str, Any] ) -> Any:
return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> int:
shutil.rmtree(self.tmpdirname )
def __magic_name__( self :Dict ) -> str:
__SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Tuple = BarkProcessor(tokenizer=lowerCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : Optional[Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __magic_name__( self :Tuple ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Dict = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
__SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __magic_name__( self :List[str] ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
__SCREAMING_SNAKE_CASE : str = 35
__SCREAMING_SNAKE_CASE : str = 2
__SCREAMING_SNAKE_CASE : List[Any] = 8
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''semantic_prompt''': np.ones(lowerCAmelCase__ ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
__SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=self.input_string , voice_preset=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
__SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = processor(text=self.input_string , voice_preset=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
__SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def __magic_name__( self :Tuple ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Any = BarkProcessor(tokenizer=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = processor(text=self.input_string )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer(
self.input_string , padding='''max_length''' , max_length=256 , add_special_tokens=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 9 | 1 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
__lowerCAmelCase : List[Any] =logging.get_logger(__name__)
@add_end_docstrings(
A__ , r'''
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
''' , )
class _lowercase ( A__ ):
'''simple docstring'''
def __magic_name__( self :Any , lowerCAmelCase__ :GenericTensor ) -> np.ndarray:
if self.framework == "tf":
__SCREAMING_SNAKE_CASE : str = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
__SCREAMING_SNAKE_CASE : List[str] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=lowerCAmelCase__ )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :GenericTensor ) -> np.ndarray:
__SCREAMING_SNAKE_CASE : str = self.get_masked_index(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , f'''No mask_token ({self.tokenizer.mask_token}) found on the input''' , )
def __magic_name__( self :str , lowerCAmelCase__ :GenericTensor ) -> int:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(lowerCAmelCase__ )
def __magic_name__( self :int , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[Any]=None , **lowerCAmelCase__ :Tuple ) -> Dict[str, GenericTensor]:
if return_tensors is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.framework
__SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
self.ensure_exactly_one_mask_token(lowerCAmelCase__ )
return model_inputs
def __magic_name__( self :Dict , lowerCAmelCase__ :Dict ) -> Tuple:
__SCREAMING_SNAKE_CASE : Tuple = self.model(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = model_inputs['''input_ids''']
return model_outputs
def __magic_name__( self :Dict , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[int]=5 , lowerCAmelCase__ :Tuple=None ) -> Union[str, Any]:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
__SCREAMING_SNAKE_CASE : Dict = target_ids.shape[0]
__SCREAMING_SNAKE_CASE : int = model_outputs['''input_ids'''][0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = model_outputs['''logits''']
if self.framework == "tf":
__SCREAMING_SNAKE_CASE : List[str] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
__SCREAMING_SNAKE_CASE : int = outputs.numpy()
__SCREAMING_SNAKE_CASE : Tuple = outputs[0, masked_index, :]
__SCREAMING_SNAKE_CASE : List[str] = stable_softmax(lowerCAmelCase__ , axis=-1 )
if target_ids is not None:
__SCREAMING_SNAKE_CASE : Dict = tf.gather_nd(tf.squeeze(lowerCAmelCase__ , 0 ) , target_ids.reshape(-1 , 1 ) )
__SCREAMING_SNAKE_CASE : Optional[int] = tf.expand_dims(lowerCAmelCase__ , 0 )
__SCREAMING_SNAKE_CASE : List[Any] = tf.math.top_k(lowerCAmelCase__ , k=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = topk.values.numpy(), topk.indices.numpy()
else:
__SCREAMING_SNAKE_CASE : int = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=lowerCAmelCase__ ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
__SCREAMING_SNAKE_CASE : int = outputs[0, masked_index, :]
__SCREAMING_SNAKE_CASE : Dict = logits.softmax(dim=-1 )
if target_ids is not None:
__SCREAMING_SNAKE_CASE : List[Any] = probs[..., target_ids]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[int] = probs.topk(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = []
__SCREAMING_SNAKE_CASE : int = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
__SCREAMING_SNAKE_CASE : int = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
__SCREAMING_SNAKE_CASE : Union[str, Any] = input_ids.numpy().copy()
if target_ids is not None:
__SCREAMING_SNAKE_CASE : List[Any] = target_ids[p].tolist()
__SCREAMING_SNAKE_CASE : Dict = p
# Filter padding out:
__SCREAMING_SNAKE_CASE : Optional[Any] = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
__SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(lowerCAmelCase__ )
result.append(lowerCAmelCase__ )
if single_mask:
return result[0]
return result
def __magic_name__( self :int , lowerCAmelCase__ :Dict , lowerCAmelCase__ :str=None ) -> Any:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : int = [targets]
try:
__SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer.get_vocab()
except Exception:
__SCREAMING_SNAKE_CASE : int = {}
__SCREAMING_SNAKE_CASE : Dict = []
for target in targets:
__SCREAMING_SNAKE_CASE : int = vocab.get(lowerCAmelCase__ , lowerCAmelCase__ )
if id_ is None:
__SCREAMING_SNAKE_CASE : Any = self.tokenizer(
lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , max_length=1 , truncation=lowerCAmelCase__ , )['''input_ids''']
if len(lowerCAmelCase__ ) == 0:
logger.warning(
f'''The specified target token `{target}` does not exist in the model vocabulary. '''
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
__SCREAMING_SNAKE_CASE : List[Any] = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f'''The specified target token `{target}` does not exist in the model vocabulary. '''
f'''Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.''' )
target_ids.append(id_ )
__SCREAMING_SNAKE_CASE : Optional[int] = list(set(lowerCAmelCase__ ) )
if len(lowerCAmelCase__ ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
__SCREAMING_SNAKE_CASE : Any = np.array(lowerCAmelCase__ )
return target_ids
def __magic_name__( self :Any , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :Optional[Any]=None ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = {}
if targets is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_target_ids(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = target_ids
if top_k is not None:
__SCREAMING_SNAKE_CASE : List[Any] = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self :List[str] , lowerCAmelCase__ :Any , *lowerCAmelCase__ :Tuple , **lowerCAmelCase__ :int ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : int = super().__call__(lowerCAmelCase__ , **lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(lowerCAmelCase__ ) == 1:
return outputs[0]
return outputs
| 9 |
from importlib import import_module
from .logging import get_logger
__lowerCAmelCase : str =get_logger(__name__)
class _lowercase :
'''simple docstring'''
def __init__( self :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :str=None ) -> int:
__SCREAMING_SNAKE_CASE : List[str] = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('''__''' ):
setattr(self , lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = module._original_module if isinstance(lowerCAmelCase__ , _PatchedModuleObj ) else module
class _lowercase :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = []
def __init__( self :Tuple , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Any , lowerCAmelCase__ :Dict=None ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = obj
__SCREAMING_SNAKE_CASE : str = target
__SCREAMING_SNAKE_CASE : Dict = new
__SCREAMING_SNAKE_CASE : Union[str, Any] = target.split('''.''' )[0]
__SCREAMING_SNAKE_CASE : List[str] = {}
__SCREAMING_SNAKE_CASE : Tuple = attrs or []
def __enter__( self :int ) -> Dict:
*__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = self.target.split('''.''' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowerCAmelCase__ ) ):
try:
__SCREAMING_SNAKE_CASE : Any = import_module('''.'''.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
__SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(self.obj , lowerCAmelCase__ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowerCAmelCase__ , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
__SCREAMING_SNAKE_CASE : int = obj_attr
# patch at top level
setattr(self.obj , lowerCAmelCase__ , _PatchedModuleObj(lowerCAmelCase__ , attrs=self.attrs ) )
__SCREAMING_SNAKE_CASE : List[str] = getattr(self.obj , lowerCAmelCase__ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowerCAmelCase__ , lowerCAmelCase__ , _PatchedModuleObj(getattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , attrs=self.attrs ) )
__SCREAMING_SNAKE_CASE : Tuple = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
# finally set the target attribute
setattr(lowerCAmelCase__ , lowerCAmelCase__ , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
__SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(import_module('''.'''.join(lowerCAmelCase__ ) ) , lowerCAmelCase__ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowerCAmelCase__ ) is attr_value:
__SCREAMING_SNAKE_CASE : Any = getattr(self.obj , lowerCAmelCase__ )
setattr(self.obj , lowerCAmelCase__ , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
__SCREAMING_SNAKE_CASE : Union[str, Any] = globals()['''__builtins__'''][target_attr]
setattr(self.obj , lowerCAmelCase__ , self.new )
else:
raise RuntimeError(f'''Tried to patch attribute {target_attr} instead of a submodule.''' )
def __exit__( self :str , *lowerCAmelCase__ :Union[str, Any] ) -> Optional[int]:
for attr in list(self.original ):
setattr(self.obj , lowerCAmelCase__ , self.original.pop(lowerCAmelCase__ ) )
def __magic_name__( self :List[Any] ) -> List[Any]:
self.__enter__()
self._active_patches.append(self )
def __magic_name__( self :Optional[int] ) -> int:
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 9 | 1 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = "x" , lowercase__ = 10**-10 , lowercase__ = 1 , ):
__SCREAMING_SNAKE_CASE : Optional[Any] = symbols(lowercase__ )
__SCREAMING_SNAKE_CASE : Any = lambdify(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = lambdify(lowercase__ , diff(lowercase__ , lowercase__ ) )
__SCREAMING_SNAKE_CASE : Optional[int] = starting_point
while True:
if diff_function(lowercase__ ) != 0:
__SCREAMING_SNAKE_CASE : int = prev_guess - multiplicity * func(lowercase__ ) / diff_function(
lowercase__ )
else:
raise ZeroDivisionError('''Could not find root''' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
__SCREAMING_SNAKE_CASE : Union[str, Any] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}""")
# Find value of e
print(
'The root of log(y) - 1 = 0 is ',
f"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
'The root of exp(x) - 1 = 0 is',
f"""{newton_raphson("exp(x) - 1", 1_0, precision=0.0_0_5)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 9 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__lowerCAmelCase : List[str] ='true'
def _UpperCamelCase ( lowercase__ , lowercase__=82 , lowercase__=16 ):
set_seed(42 )
__SCREAMING_SNAKE_CASE : Optional[int] = RegressionModel()
__SCREAMING_SNAKE_CASE : Optional[int] = deepcopy(lowercase__ )
__SCREAMING_SNAKE_CASE : Any = RegressionDataset(length=lowercase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = DataLoader(lowercase__ , batch_size=lowercase__ )
model.to(accelerator.device )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = accelerator.prepare(lowercase__ , lowercase__ )
return model, ddp_model, dataloader
def _UpperCamelCase ( lowercase__ , lowercase__=False ):
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
__SCREAMING_SNAKE_CASE : str = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
with accelerator.main_process_first():
__SCREAMING_SNAKE_CASE : Tuple = dataset.map(
lowercase__ , batched=lowercase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
__SCREAMING_SNAKE_CASE : List[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowercase__ ):
if use_longest:
return tokenizer.pad(lowercase__ , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(lowercase__ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return DataLoader(lowercase__ , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=16 )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : str = Accelerator(dispatch_batches=lowercase__ , split_batches=lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = get_dataloader(lowercase__ , not dispatch_batches )
__SCREAMING_SNAKE_CASE : List[str] = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = accelerator.prepare(lowercase__ , lowercase__ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = []
for batch in dataloader:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = batch.values()
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Dict = model(lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = [], []
for logit, targ in logits_and_targets:
logits.append(lowercase__ )
targs.append(lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = torch.cat(lowercase__ ), torch.cat(lowercase__ )
return logits, targs
def _UpperCamelCase ( lowercase__ , lowercase__=82 , lowercase__=False , lowercase__=False , lowercase__=16 ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = get_basic_setup(lowercase__ , lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = generate_predictions(lowercase__ , lowercase__ , lowercase__ )
assert (
len(lowercase__ ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowercase__ )}'''
def _UpperCamelCase ( lowercase__ = False , lowercase__ = False ):
__SCREAMING_SNAKE_CASE : Optional[Any] = evaluate.load('''glue''' , '''mrpc''' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = get_mrpc_setup(lowercase__ , lowercase__ )
# First do baseline
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = setup['''no''']
model.to(lowercase__ )
model.eval()
for batch in dataloader:
batch.to(lowercase__ )
with torch.inference_mode():
__SCREAMING_SNAKE_CASE : Dict = model(**lowercase__ )
__SCREAMING_SNAKE_CASE : Dict = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowercase__ , references=batch['''labels'''] )
__SCREAMING_SNAKE_CASE : int = metric.compute()
# Then do distributed
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
__SCREAMING_SNAKE_CASE : int = model(**lowercase__ )
__SCREAMING_SNAKE_CASE : str = outputs.logits.argmax(dim=-1 )
__SCREAMING_SNAKE_CASE : Any = batch['''labels''']
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowercase__ , references=lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Dict = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(lowercase__ , lowercase__ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__SCREAMING_SNAKE_CASE : List[Any] = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(lowercase__ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
__SCREAMING_SNAKE_CASE : Tuple = Accelerator()
test_torch_metrics(lowercase__ , 512 )
accelerator.state._reset_state()
def _UpperCamelCase ( lowercase__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 9 | 1 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class _lowercase :
'''simple docstring'''
def __init__( self :Union[str, Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :str=None , lowerCAmelCase__ :str=None , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :str="resnet50" , lowerCAmelCase__ :Optional[int]=3 , lowerCAmelCase__ :Union[str, Any]=32 , lowerCAmelCase__ :Optional[Any]=3 , lowerCAmelCase__ :int=True , lowerCAmelCase__ :Optional[Any]=True , ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Dict = parent
__SCREAMING_SNAKE_CASE : Dict = out_indices if out_indices is not None else [4]
__SCREAMING_SNAKE_CASE : Optional[Any] = stage_names
__SCREAMING_SNAKE_CASE : Dict = out_features
__SCREAMING_SNAKE_CASE : List[Any] = backbone
__SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_size
__SCREAMING_SNAKE_CASE : str = num_channels
__SCREAMING_SNAKE_CASE : Dict = use_pretrained_backbone
__SCREAMING_SNAKE_CASE : Tuple = is_training
def __magic_name__( self :Optional[Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE : List[str] = self.get_config()
return config, pixel_values
def __magic_name__( self :Dict ) -> Dict:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __magic_name__( self :int , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[int] ) -> Any:
__SCREAMING_SNAKE_CASE : Any = TimmBackbone(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE : List[str] = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __magic_name__( self :int ) -> Tuple:
__SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = config_and_inputs
__SCREAMING_SNAKE_CASE : Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class _lowercase ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = (TimmBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : List[str] = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
SCREAMING_SNAKE_CASE__ : List[Any] = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
SCREAMING_SNAKE_CASE__ : Optional[int] = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
def __magic_name__( self :Any ) -> List[str]:
__SCREAMING_SNAKE_CASE : List[str] = TimmBackboneModelTester(self )
__SCREAMING_SNAKE_CASE : Union[str, Any] = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> Optional[int]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __magic_name__( self :List[Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Any = '''resnet18'''
__SCREAMING_SNAKE_CASE : Optional[Any] = '''microsoft/resnet-18'''
__SCREAMING_SNAKE_CASE : str = AutoBackbone.from_pretrained(lowerCAmelCase__ , use_timm_backbone=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoBackbone.from_pretrained(lowerCAmelCase__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__SCREAMING_SNAKE_CASE : Any = AutoBackbone.from_pretrained(lowerCAmelCase__ , use_timm_backbone=lowerCAmelCase__ , out_indices=[1, 2, 3] )
__SCREAMING_SNAKE_CASE : Dict = AutoBackbone.from_pretrained(lowerCAmelCase__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def __magic_name__( self :str ) -> List[Any]:
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def __magic_name__( self :Tuple ) -> Any:
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def __magic_name__( self :List[str] ) -> int:
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def __magic_name__( self :List[str] ) -> Tuple:
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def __magic_name__( self :str ) -> str:
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def __magic_name__( self :Dict ) -> Tuple:
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def __magic_name__( self :List[str] ) -> Tuple:
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def __magic_name__( self :Dict ) -> Optional[Any]:
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def __magic_name__( self :Union[str, Any] ) -> int:
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def __magic_name__( self :List[str] ) -> Union[str, Any]:
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def __magic_name__( self :Dict ) -> str:
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def __magic_name__( self :Dict ) -> Any:
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def __magic_name__( self :str ) -> Tuple:
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def __magic_name__( self :int ) -> str:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __magic_name__( self :Union[str, Any] ) -> Union[str, Any]:
pass
def __magic_name__( self :List[Any] ) -> List[str]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE : Optional[int] = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __magic_name__( self :Dict ) -> List[Any]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE : Optional[int] = True
__SCREAMING_SNAKE_CASE : Tuple = self.has_attentions
# no need to test all models as different heads yield the same functionality
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.all_model_classes[0]
__SCREAMING_SNAKE_CASE : int = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = model(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = outputs[0][-1]
# Encoder-/Decoder-only models
__SCREAMING_SNAKE_CASE : Dict = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__SCREAMING_SNAKE_CASE : List[Any] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowerCAmelCase__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __magic_name__( self :Optional[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : Any = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Any = model(**lowerCAmelCase__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__SCREAMING_SNAKE_CASE : Dict = copy.deepcopy(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = None
__SCREAMING_SNAKE_CASE : str = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(**lowerCAmelCase__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__SCREAMING_SNAKE_CASE : Dict = copy.deepcopy(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = False
__SCREAMING_SNAKE_CASE : List[str] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[str] = model(**lowerCAmelCase__ )
| 9 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__lowerCAmelCase : Union[str, Any] ={
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
}
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
if got_ver is None or want_ver is None:
raise ValueError(
F'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
F''' reinstalling {pkg}.''' )
if not ops[op](version.parse(lowercase__ ) , version.parse(lowercase__ ) ):
raise ImportError(
F'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def _UpperCamelCase ( lowercase__ , lowercase__ = None ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = F'''\n{hint}''' if hint is not None else ''''''
# non-versioned check
if re.match(R'''^[\w_\-\d]+$''' , lowercase__ ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = requirement, None, None
else:
__SCREAMING_SNAKE_CASE : List[Any] = re.findall(R'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''' , lowercase__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
F''' got {requirement}''' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = match[0]
__SCREAMING_SNAKE_CASE : Optional[int] = want_full.split(''',''' ) # there could be multiple requirements
__SCREAMING_SNAKE_CASE : Optional[Any] = {}
for w in want_range:
__SCREAMING_SNAKE_CASE : Any = re.findall(R'''^([\s!=<>]{1,2})(.+)''' , lowercase__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
F''' but got {requirement}''' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = match[0]
__SCREAMING_SNAKE_CASE : List[Any] = want_ver
if op not in ops:
raise ValueError(F'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
__SCREAMING_SNAKE_CASE : Optional[Any] = '''.'''.join([str(lowercase__ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
return
# check if any version is installed
try:
__SCREAMING_SNAKE_CASE : Optional[int] = importlib.metadata.version(lowercase__ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(lowercase__ , lowercase__ )
| 9 | 1 |
def _UpperCamelCase ( lowercase__ , lowercase__ ):
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 |
from __future__ import annotations
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = 0.00
__SCREAMING_SNAKE_CASE : List[str] = 0
for resistor in resistors:
if resistor <= 0:
__SCREAMING_SNAKE_CASE : Any = F'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(lowercase__ )
first_sum += 1 / float(lowercase__ )
index += 1
return 1 / first_sum
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = 0.00
__SCREAMING_SNAKE_CASE : int = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
__SCREAMING_SNAKE_CASE : Tuple = F'''Resistor at index {index} has a negative value!'''
raise ValueError(lowercase__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 | 1 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : List[str] =get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = XLMRobertaTokenizer
SCREAMING_SNAKE_CASE__ : List[str] = XLMRobertaTokenizerFast
SCREAMING_SNAKE_CASE__ : str = True
SCREAMING_SNAKE_CASE__ : int = True
def __magic_name__( self :List[str] ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
__SCREAMING_SNAKE_CASE : Optional[int] = XLMRobertaTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__( self :int ) -> str:
__SCREAMING_SNAKE_CASE : Optional[Any] = '''<pad>'''
__SCREAMING_SNAKE_CASE : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def __magic_name__( self :Optional[Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(lowerCAmelCase__ ) , 1_002 )
def __magic_name__( self :int ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size , 1_002 )
def __magic_name__( self :Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = XLMRobertaTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
__SCREAMING_SNAKE_CASE : int = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def __magic_name__( self :Optional[Any] ) -> Optional[int]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__SCREAMING_SNAKE_CASE : List[Any] = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__SCREAMING_SNAKE_CASE : Tuple = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : Tuple = tokenizer_r.save_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
__SCREAMING_SNAKE_CASE : List[str] = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Checks everything loads correctly in the same way
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.from_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCAmelCase__ )
# Save tokenizer rust, legacy_format=True
__SCREAMING_SNAKE_CASE : List[str] = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Checks everything loads correctly in the same way
__SCREAMING_SNAKE_CASE : Any = tokenizer_r.from_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
shutil.rmtree(lowerCAmelCase__ )
# Save tokenizer rust, legacy_format=False
__SCREAMING_SNAKE_CASE : List[str] = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__SCREAMING_SNAKE_CASE : Tuple = tokenizer_r.from_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
shutil.rmtree(lowerCAmelCase__ )
@cached_property
def __magic_name__( self :List[str] ) -> Union[str, Any]:
return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' )
def __magic_name__( self :Union[str, Any] ) -> Tuple:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase__ , f.name )
__SCREAMING_SNAKE_CASE : Tuple = XLMRobertaTokenizer(f.name , keep_accents=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = pickle.dumps(lowerCAmelCase__ )
pickle.loads(lowerCAmelCase__ )
def __magic_name__( self :Optional[int] ) -> int:
if not self.test_rust_tokenizer:
return
__SCREAMING_SNAKE_CASE : int = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Any = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE : int = '''I was born in 92000, and this is falsé.'''
__SCREAMING_SNAKE_CASE : str = tokenizer.tokenize(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE : str = tokenizer.encode(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def __magic_name__( self :Optional[int] ) -> int:
__SCREAMING_SNAKE_CASE : Tuple = '''Hello World!'''
__SCREAMING_SNAKE_CASE : Optional[int] = [0, 35_378, 6_661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__ ) )
@slow
def __magic_name__( self :Dict ) -> int:
__SCREAMING_SNAKE_CASE : List[Any] = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
__SCREAMING_SNAKE_CASE : Any = [
0,
3_293,
83,
10,
4_552,
4_989,
7_986,
678,
10,
5_915,
111,
179_459,
124_850,
4,
6_044,
237,
12,
6,
5,
6,
4,
6_780,
705,
15,
1_388,
44,
378,
10_114,
711,
152,
20,
6,
5,
22_376,
642,
1_221,
15_190,
34_153,
450,
5_608,
959,
1_119,
57_702,
136,
186,
47,
1_098,
29_367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6_044,
237,
6_284,
50_901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__ ) )
@slow
def __magic_name__( self :Dict ) -> List[Any]:
# fmt: off
__SCREAMING_SNAKE_CASE : Tuple = {'''input_ids''': [[0, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [0, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , )
| 9 |
from ..utils import DummyObject, requires_backends
class _lowercase ( metaclass=A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ['''keras_nlp''']
def __init__( self :Tuple , *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :Dict ) -> Dict:
requires_backends(self , ['''keras_nlp'''] )
| 9 | 1 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 9 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self :Any , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple=7 , lowerCAmelCase__ :List[Any]=3 , lowerCAmelCase__ :Any=10 , lowerCAmelCase__ :Optional[int]=18 , lowerCAmelCase__ :Dict=30 , lowerCAmelCase__ :Tuple=400 , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :str=True , lowerCAmelCase__ :List[str]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :List[str]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :Optional[Any]=None , ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Dict = size if size is not None else {'''shortest_edge''': 18}
__SCREAMING_SNAKE_CASE : Optional[int] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__SCREAMING_SNAKE_CASE : Tuple = parent
__SCREAMING_SNAKE_CASE : List[Any] = batch_size
__SCREAMING_SNAKE_CASE : List[str] = num_channels
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_frames
__SCREAMING_SNAKE_CASE : Tuple = image_size
__SCREAMING_SNAKE_CASE : Optional[Any] = min_resolution
__SCREAMING_SNAKE_CASE : Any = max_resolution
__SCREAMING_SNAKE_CASE : List[Any] = do_resize
__SCREAMING_SNAKE_CASE : Optional[Any] = size
__SCREAMING_SNAKE_CASE : Optional[int] = do_normalize
__SCREAMING_SNAKE_CASE : List[Any] = image_mean
__SCREAMING_SNAKE_CASE : List[str] = image_std
__SCREAMING_SNAKE_CASE : str = crop_size
def __magic_name__( self :Tuple ) -> Any:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = VivitImageProcessor if is_vision_available() else None
def __magic_name__( self :List[str] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = VivitImageProcessingTester(self )
@property
def __magic_name__( self :int ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__( self :List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_std''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''size''' ) )
def __magic_name__( self :Optional[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def __magic_name__( self :List[Any] ) -> Union[str, Any]:
# Initialize image_processing
__SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
__SCREAMING_SNAKE_CASE : List[Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for video in video_inputs:
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE : List[str] = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __magic_name__( self :str ) -> int:
# Initialize image_processing
__SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__SCREAMING_SNAKE_CASE : List[str] = prepare_video_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for video in video_inputs:
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
__SCREAMING_SNAKE_CASE : Optional[int] = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : Any = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __magic_name__( self :Any ) -> List[str]:
# Initialize image_processing
__SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE : Optional[int] = prepare_video_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for video in video_inputs:
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
__SCREAMING_SNAKE_CASE : Tuple = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : Optional[int] = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 9 | 1 |
from __future__ import annotations
def _UpperCamelCase ( lowercase__ , lowercase__ ):
# Checks if the entire collection has been sorted
if len(lowercase__ ) <= 1 or n <= 1:
return
insert_next(lowercase__ , n - 1 )
rec_insertion_sort(lowercase__ , n - 1 )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
# Checks order between adjacent elements
if index >= len(lowercase__ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = (
collection[index],
collection[index - 1],
)
insert_next(lowercase__ , index + 1 )
if __name__ == "__main__":
__lowerCAmelCase : Any =input('Enter integers separated by spaces: ')
__lowerCAmelCase : list[int] =[int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 9 |
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self :Optional[Any] , lowerCAmelCase__ :Optional[Any] ) -> str:
__SCREAMING_SNAKE_CASE : Optional[Any] = parent
def __magic_name__( self :List[Any] ) -> Tuple:
return {}
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Optional[Any] = '''<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR="FFFFFF">
<HR>
<a href="http://google.com">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style="color:#0000FF">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>'''
__SCREAMING_SNAKE_CASE : str = '''
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
'''
return [html_string_a, html_string_a]
@require_bsa
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = MarkupLMFeatureExtractor if is_bsa_available() else None
def __magic_name__( self :int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = MarkupLMFeatureExtractionTester(self )
@property
def __magic_name__( self :Any ) -> Optional[Any]:
return self.feature_extract_tester.prepare_feat_extract_dict()
def __magic_name__( self :Optional[int] ) -> Any:
# Initialize feature_extractor
__SCREAMING_SNAKE_CASE : int = self.feature_extraction_class()
# Test not batched input
__SCREAMING_SNAKE_CASE : Tuple = get_html_strings()[0]
__SCREAMING_SNAKE_CASE : Dict = feature_extractor(lowerCAmelCase__ )
# fmt: off
__SCREAMING_SNAKE_CASE : str = [['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']]
__SCREAMING_SNAKE_CASE : List[str] = [['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']]
# fmt: on
self.assertEqual(encoding.nodes , lowerCAmelCase__ )
self.assertEqual(encoding.xpaths , lowerCAmelCase__ )
# Test batched
__SCREAMING_SNAKE_CASE : Tuple = get_html_strings()
__SCREAMING_SNAKE_CASE : Dict = feature_extractor(lowerCAmelCase__ )
# fmt: off
__SCREAMING_SNAKE_CASE : int = expected_nodes + [['''My First Heading''', '''My first paragraph.''']]
__SCREAMING_SNAKE_CASE : str = expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , lowerCAmelCase__ )
self.assertEqual(encoding.xpaths , lowerCAmelCase__ )
| 9 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _lowercase :
'''simple docstring'''
def __init__( self :Optional[int] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :int=13 , lowerCAmelCase__ :List[str]=7 , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :str=True , lowerCAmelCase__ :List[Any]=99 , lowerCAmelCase__ :List[str]=32 , lowerCAmelCase__ :Any=5 , lowerCAmelCase__ :List[str]=4 , lowerCAmelCase__ :int=37 , lowerCAmelCase__ :Optional[int]="gelu" , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Optional[Any]=512 , lowerCAmelCase__ :Union[str, Any]=16 , lowerCAmelCase__ :Dict=2 , lowerCAmelCase__ :Tuple=0.02 , lowerCAmelCase__ :List[Any]=3 , lowerCAmelCase__ :Tuple=4 , lowerCAmelCase__ :int=None , ) -> int:
__SCREAMING_SNAKE_CASE : Dict = parent
__SCREAMING_SNAKE_CASE : Any = batch_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = seq_length
__SCREAMING_SNAKE_CASE : Optional[Any] = is_training
__SCREAMING_SNAKE_CASE : int = use_token_type_ids
__SCREAMING_SNAKE_CASE : Any = use_labels
__SCREAMING_SNAKE_CASE : Any = vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = hidden_size
__SCREAMING_SNAKE_CASE : int = num_hidden_layers
__SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : str = intermediate_size
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
__SCREAMING_SNAKE_CASE : List[str] = type_sequence_label_size
__SCREAMING_SNAKE_CASE : List[str] = initializer_range
__SCREAMING_SNAKE_CASE : Tuple = num_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_choices
__SCREAMING_SNAKE_CASE : Union[str, Any] = scope
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.vocab_size - 1
def __magic_name__( self :Optional[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE : Dict = None
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE : Optional[int] = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__SCREAMING_SNAKE_CASE : Any = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __magic_name__( self :Tuple , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Any , *lowerCAmelCase__ :Union[str, Any] ) -> Any:
__SCREAMING_SNAKE_CASE : Any = OpenAIGPTModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Dict = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , head_mask=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Dict , *lowerCAmelCase__ :List[Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = OpenAIGPTLMHeadModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__( self :Tuple , lowerCAmelCase__ :Dict , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , *lowerCAmelCase__ :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : Any = OpenAIGPTDoubleHeadsModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Any = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__( self :Dict , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :str , *lowerCAmelCase__ :Optional[int] ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
__SCREAMING_SNAKE_CASE : List[Any] = OpenAIGPTForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__( self :Optional[Any] ) -> str:
__SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) : List[str] = config_and_inputs
__SCREAMING_SNAKE_CASE : List[str] = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class _lowercase ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : str = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
SCREAMING_SNAKE_CASE__ : str = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :str , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any] ) -> Tuple:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def __magic_name__( self :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :int , lowerCAmelCase__ :int=False ) -> Dict:
__SCREAMING_SNAKE_CASE : Tuple = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__SCREAMING_SNAKE_CASE : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Tuple = inputs_dict['''labels''']
__SCREAMING_SNAKE_CASE : Dict = inputs_dict['''labels''']
__SCREAMING_SNAKE_CASE : List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
return inputs_dict
def __magic_name__( self :Optional[int] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : int = OpenAIGPTModelTester(self )
__SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase__ , n_embd=37 )
def __magic_name__( self :Any ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __magic_name__( self :List[str] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowerCAmelCase__ )
def __magic_name__( self :int ) -> int:
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> Dict:
__SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> str:
__SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowerCAmelCase__ )
@slow
def __magic_name__( self :Any ) -> List[Any]:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Dict = OpenAIGPTModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __magic_name__( self :Union[str, Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[str] = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[481, 4_735, 544]] , dtype=torch.long , device=lowerCAmelCase__ ) # the president is
__SCREAMING_SNAKE_CASE : Dict = [
481,
4_735,
544,
246,
963,
870,
762,
239,
244,
40_477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__SCREAMING_SNAKE_CASE : Dict = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ )
self.assertListEqual(output_ids[0].tolist() , lowerCAmelCase__ )
| 9 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = TransfoXLTokenizer
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
def __magic_name__( self :str ) -> Dict:
super().setUp()
__SCREAMING_SNAKE_CASE : List[str] = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
__SCREAMING_SNAKE_CASE : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __magic_name__( self :Any , **lowerCAmelCase__ :int ) -> str:
__SCREAMING_SNAKE_CASE : Optional[Any] = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :List[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : Dict = '''<unk> UNwanted , running'''
__SCREAMING_SNAKE_CASE : List[str] = '''<unk> unwanted, running'''
return input_text, output_text
def __magic_name__( self :Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : int = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(lowerCAmelCase__ , ['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [0, 4, 8, 7] )
def __magic_name__( self :Tuple ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = TransfoXLTokenizer(lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def __magic_name__( self :Tuple ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = TransfoXLTokenizer(lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __magic_name__( self :Dict ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[str] = TransfoXLTokenizer(lower_case=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
__SCREAMING_SNAKE_CASE : Optional[int] = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(tokenizer.convert_tokens_to_string(lowerCAmelCase__ ) , lowerCAmelCase__ )
def __magic_name__( self :str ) -> int:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Any = len(lowerCAmelCase__ )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(lowerCAmelCase__ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , '''new1''' )
| 9 | 1 |
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : int = checkpoints.load_tax_checkpoint(lowercase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = flatten_dict(lowercase__ )
return flax_params
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = {}
__SCREAMING_SNAKE_CASE : str = {
'''token_embedder''': '''embeddings''',
'''encoder_norm''': '''layernorm''',
'''kernel''': '''weight''',
'''.out''': '''.output''',
'''scale''': '''weight''',
'''embedders_0.pos_embedding''': '''row_embedder.weight''',
'''embedders_1.pos_embedding''': '''column_embedder.weight''',
}
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''query''': '''attention.query''',
'''key''': '''attention.key''',
'''value''': '''attention.value''',
'''output.dense''': '''output''',
'''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''',
'''pre_self_attention_layer_norm''': '''self_attention.layer_norm''',
'''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''',
'''mlp.''': '''mlp.DenseReluDense.''',
'''pre_mlp_layer_norm''': '''mlp.layer_norm''',
'''self_attention.o''': '''self_attention.attention.o''',
'''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''',
'''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''',
'''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.logits_dense.weight''': '''decoder.lm_head.weight''',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
__SCREAMING_SNAKE_CASE : Optional[Any] = '''.'''.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
__SCREAMING_SNAKE_CASE : Optional[int] = new_key.replace(lowercase__ , lowercase__ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
__SCREAMING_SNAKE_CASE : List[str] = new_key.replace(lowercase__ , lowercase__ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
__SCREAMING_SNAKE_CASE : Union[str, Any] = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , lowercase__ )
__SCREAMING_SNAKE_CASE : Any = new_key.replace('''encoder''' , '''encoder.encoder''' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
__SCREAMING_SNAKE_CASE : List[Any] = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , lowercase__ )
__SCREAMING_SNAKE_CASE : Any = flax_dict[key]
__SCREAMING_SNAKE_CASE : Any = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
__SCREAMING_SNAKE_CASE : Optional[int] = torch.from_numpy(converted_dict[key].T )
else:
__SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__=False , lowercase__=False ):
__SCREAMING_SNAKE_CASE : int = get_flax_param(lowercase__ )
if not use_large:
__SCREAMING_SNAKE_CASE : str = PixaStructVisionConfig()
__SCREAMING_SNAKE_CASE : List[Any] = PixaStructTextConfig()
else:
__SCREAMING_SNAKE_CASE : Any = PixaStructVisionConfig(
hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 )
__SCREAMING_SNAKE_CASE : Tuple = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 )
__SCREAMING_SNAKE_CASE : List[str] = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=lowercase__ )
__SCREAMING_SNAKE_CASE : int = PixaStructForConditionalGeneration(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = rename_and_convert_flax_params(lowercase__ )
model.load_state_dict(lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' )
__SCREAMING_SNAKE_CASE : Tuple = PixaStructImageProcessor()
__SCREAMING_SNAKE_CASE : Dict = PixaStructProcessor(image_processor=lowercase__ , tokenizer=lowercase__ )
if use_large:
__SCREAMING_SNAKE_CASE : Optional[Any] = 4096
__SCREAMING_SNAKE_CASE : Tuple = True
# mkdir if needed
os.makedirs(lowercase__ , exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
processor.save_pretrained(lowercase__ )
print('''Model saved in {}'''.format(lowercase__ ) )
if __name__ == "__main__":
__lowerCAmelCase : Dict =argparse.ArgumentParser()
parser.add_argument('--t5x_checkpoint_path', default=None, type=str, help='Path to the original T5x checkpoint.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--use_large', action='store_true', help='Use large model.')
parser.add_argument('--is_vqa', action='store_true', help='Use large model.')
__lowerCAmelCase : List[Any] =parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 9 |
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__=False ):
if isinstance(lowercase__ , lowercase__ ) and isinstance(lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = len(set_a.intersection(lowercase__ ) )
if alternative_union:
__SCREAMING_SNAKE_CASE : int = len(lowercase__ ) + len(lowercase__ )
else:
__SCREAMING_SNAKE_CASE : int = len(set_a.union(lowercase__ ) )
return intersection / union
if isinstance(lowercase__ , (list, tuple) ) and isinstance(lowercase__ , (list, tuple) ):
__SCREAMING_SNAKE_CASE : Dict = [element for element in set_a if element in set_b]
if alternative_union:
__SCREAMING_SNAKE_CASE : Optional[int] = len(lowercase__ ) + len(lowercase__ )
return len(lowercase__ ) / union
else:
__SCREAMING_SNAKE_CASE : Tuple = set_a + [element for element in set_b if element not in set_a]
return len(lowercase__ ) / len(lowercase__ )
return len(lowercase__ ) / len(lowercase__ )
return None
if __name__ == "__main__":
__lowerCAmelCase : List[Any] ={'a', 'b', 'c', 'd', 'e'}
__lowerCAmelCase : Optional[Any] ={'c', 'd', 'e', 'f', 'h', 'i'}
print(jaccard_similarity(set_a, set_b))
| 9 | 1 |
import copy
import re
class _lowercase :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = '''hp'''
SCREAMING_SNAKE_CASE__ : int = {}
SCREAMING_SNAKE_CASE__ : List[str] = None
@classmethod
def __magic_name__( cls :int , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[str] ) -> int:
__SCREAMING_SNAKE_CASE : List[str] = prefix
__SCREAMING_SNAKE_CASE : int = defaults
cls.build_naming_info()
@staticmethod
def __magic_name__( lowerCAmelCase__ :str , lowerCAmelCase__ :Union[str, Any] ) -> Tuple:
if len(lowerCAmelCase__ ) == 0:
return ""
__SCREAMING_SNAKE_CASE : str = None
if any(char.isdigit() for char in word ):
raise Exception(f'''Parameters should not contain numbers: \'{word}\' contains a number''' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(lowerCAmelCase__ ) + 1 ):
__SCREAMING_SNAKE_CASE : Tuple = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(lowerCAmelCase__ :Union[str, Any] ):
__SCREAMING_SNAKE_CASE : List[Any] = ''''''
while integer != 0:
__SCREAMING_SNAKE_CASE : List[str] = chr(ord('''A''' ) + integer % 10 ) + s
integer //= 10
return s
__SCREAMING_SNAKE_CASE : Dict = 0
while True:
__SCREAMING_SNAKE_CASE : Tuple = word + '''#''' + int_to_alphabetic(lowerCAmelCase__ )
if sword in info["reverse_short_word"]:
continue
else:
__SCREAMING_SNAKE_CASE : str = sword
break
__SCREAMING_SNAKE_CASE : List[Any] = short_word
__SCREAMING_SNAKE_CASE : int = word
return short_word
@staticmethod
def __magic_name__( lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :str ) -> List[str]:
__SCREAMING_SNAKE_CASE : int = param_name.split('''_''' )
__SCREAMING_SNAKE_CASE : List[str] = [TrialShortNamer.shortname_for_word(lowerCAmelCase__ , lowerCAmelCase__ ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
__SCREAMING_SNAKE_CASE : List[str] = ['''''', '''_''']
for separator in separators:
__SCREAMING_SNAKE_CASE : Tuple = separator.join(lowerCAmelCase__ )
if shortname not in info["reverse_short_param"]:
__SCREAMING_SNAKE_CASE : Dict = shortname
__SCREAMING_SNAKE_CASE : List[Any] = param_name
return shortname
return param_name
@staticmethod
def __magic_name__( lowerCAmelCase__ :Tuple , lowerCAmelCase__ :str ) -> int:
__SCREAMING_SNAKE_CASE : int = TrialShortNamer.shortname_for_key(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = short_name
__SCREAMING_SNAKE_CASE : List[str] = param_name
@classmethod
def __magic_name__( cls :Optional[Any] ) -> Any:
if cls.NAMING_INFO is not None:
return
__SCREAMING_SNAKE_CASE : List[str] = {
'''short_word''': {},
'''reverse_short_word''': {},
'''short_param''': {},
'''reverse_short_param''': {},
}
__SCREAMING_SNAKE_CASE : Any = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = info
@classmethod
def __magic_name__( cls :int , lowerCAmelCase__ :Tuple ) -> List[Any]:
cls.build_naming_info()
assert cls.PREFIX is not None
__SCREAMING_SNAKE_CASE : Dict = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f'''You should provide a default value for the param name {k} with value {v}''' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
__SCREAMING_SNAKE_CASE : Union[str, Any] = cls.NAMING_INFO['''short_param'''][k]
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = 1 if v else 0
__SCREAMING_SNAKE_CASE : Any = '''''' if isinstance(lowerCAmelCase__ , (int, float) ) else '''-'''
__SCREAMING_SNAKE_CASE : Dict = f'''{key}{sep}{v}'''
name.append(lowerCAmelCase__ )
return "_".join(lowerCAmelCase__ )
@classmethod
def __magic_name__( cls :str , lowerCAmelCase__ :Optional[Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[int] = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
__SCREAMING_SNAKE_CASE : List[Any] = []
else:
__SCREAMING_SNAKE_CASE : List[Any] = repr.split('''_''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {}
for value in values:
if "-" in value:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = value.split('''-''' )
else:
__SCREAMING_SNAKE_CASE : int = re.sub('''[0-9.]''' , '''''' , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = float(re.sub('''[^0-9.]''' , '''''' , lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : List[str] = cls.NAMING_INFO['''reverse_short_param'''][p_k]
__SCREAMING_SNAKE_CASE : List[str] = p_v
for k in cls.DEFAULTS:
if k not in parameters:
__SCREAMING_SNAKE_CASE : Optional[int] = cls.DEFAULTS[k]
return parameters
| 9 |
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
__lowerCAmelCase : Optional[int] ='\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
__lowerCAmelCase : Optional[Any] ='\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
__lowerCAmelCase : Dict ='\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def _UpperCamelCase ( lowercase__ , lowercase__ ):
return float((preds == labels).mean() )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = simple_accuracy(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE : List[str] = float(fa_score(y_true=lowercase__ , y_pred=lowercase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = np.array(lowercase__ )
__SCREAMING_SNAKE_CASE : str = np.array(lowercase__ )
__SCREAMING_SNAKE_CASE : str = en_sentvecs.shape[0]
# mean centering
__SCREAMING_SNAKE_CASE : Tuple = en_sentvecs - np.mean(lowercase__ , axis=0 )
__SCREAMING_SNAKE_CASE : Optional[int] = in_sentvecs - np.mean(lowercase__ , axis=0 )
__SCREAMING_SNAKE_CASE : str = cdist(lowercase__ , lowercase__ , '''cosine''' )
__SCREAMING_SNAKE_CASE : int = np.array(range(lowercase__ ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = sim.argsort(axis=1 )[:, :10]
__SCREAMING_SNAKE_CASE : str = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def __magic_name__( self :Tuple ) -> Tuple:
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
'''references''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if self.config_name != '''cvit-mkb-clsr''' else None , )
def __magic_name__( self :List[str] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple ) -> str:
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(lowerCAmelCase__ , lowerCAmelCase__ )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(lowerCAmelCase__ , lowerCAmelCase__ )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
| 9 | 1 |
__lowerCAmelCase : Optional[int] ='\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
__lowerCAmelCase : Tuple =[{'type': 'code', 'content': INSTALL_CONTENT}]
__lowerCAmelCase : int ={
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 9 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = nn.functional.normalize(lowercase__ )
__SCREAMING_SNAKE_CASE : Tuple = nn.functional.normalize(lowercase__ )
return torch.mm(lowercase__ , normalized_text_embeds.t() )
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = CLIPConfig
SCREAMING_SNAKE_CASE__ : List[str] = ['''CLIPEncoderLayer''']
def __init__( self :str , lowerCAmelCase__ :CLIPConfig ) -> Tuple:
super().__init__(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = CLIPVisionModel(config.vision_config )
__SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.ones(17 ) , requires_grad=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = nn.Parameter(torch.ones(3 ) , requires_grad=lowerCAmelCase__ )
@torch.no_grad()
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[int] ) -> Dict:
__SCREAMING_SNAKE_CASE : int = self.vision_model(lowerCAmelCase__ )[1] # pooled_output
__SCREAMING_SNAKE_CASE : Optional[Any] = self.visual_projection(lowerCAmelCase__ )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__SCREAMING_SNAKE_CASE : Optional[Any] = cosine_distance(lowerCAmelCase__ , self.special_care_embeds ).cpu().float().numpy()
__SCREAMING_SNAKE_CASE : Optional[Any] = cosine_distance(lowerCAmelCase__ , self.concept_embeds ).cpu().float().numpy()
__SCREAMING_SNAKE_CASE : List[Any] = []
__SCREAMING_SNAKE_CASE : List[Any] = image_embeds.shape[0]
for i in range(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
__SCREAMING_SNAKE_CASE : List[Any] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
__SCREAMING_SNAKE_CASE : List[str] = special_cos_dist[i][concept_idx]
__SCREAMING_SNAKE_CASE : Any = self.special_care_embeds_weights[concept_idx].item()
__SCREAMING_SNAKE_CASE : Union[str, Any] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} )
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
__SCREAMING_SNAKE_CASE : int = cos_dist[i][concept_idx]
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.concept_embeds_weights[concept_idx].item()
__SCREAMING_SNAKE_CASE : Tuple = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(lowerCAmelCase__ )
result.append(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = [len(res['''bad_concepts'''] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :torch.FloatTensor ) -> Any:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.vision_model(lowerCAmelCase__ )[1] # pooled_output
__SCREAMING_SNAKE_CASE : List[str] = self.visual_projection(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = cosine_distance(lowerCAmelCase__ , self.special_care_embeds )
__SCREAMING_SNAKE_CASE : Optional[int] = cosine_distance(lowerCAmelCase__ , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
__SCREAMING_SNAKE_CASE : List[Any] = 0.0
__SCREAMING_SNAKE_CASE : Union[str, Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
__SCREAMING_SNAKE_CASE : List[str] = torch.any(special_scores > 0 , dim=1 )
__SCREAMING_SNAKE_CASE : List[str] = special_care * 0.01
__SCREAMING_SNAKE_CASE : int = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
__SCREAMING_SNAKE_CASE : Optional[int] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
__SCREAMING_SNAKE_CASE : Any = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 9 | 1 |
from abc import ABC, abstractmethod
from typing import List, Optional
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :Tuple ) -> Union[str, Any]:
# test for the above condition
self.test()
def __magic_name__( self :List[str] ) -> int:
__SCREAMING_SNAKE_CASE : List[Any] = 0
__SCREAMING_SNAKE_CASE : Tuple = False
while not completed:
if counter == 1:
self.reset()
__SCREAMING_SNAKE_CASE : Optional[Any] = self.advance()
if not self.does_advance(lowerCAmelCase__ ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = self.update(lowerCAmelCase__ )
counter += 1
if counter > 10_000:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def __magic_name__( self :Optional[int] ) -> str:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __magic_name__( self :str , lowerCAmelCase__ :int ) -> Optional[int]:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __magic_name__( self :Dict , lowerCAmelCase__ :int ) -> int:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __magic_name__( self :List[str] ) -> Tuple:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __magic_name__( self :Tuple ) -> Tuple:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Dict=False ) -> Tuple:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :int , lowerCAmelCase__ :List[int] ) -> Optional[Any]:
super(lowerCAmelCase__ , self ).__init__()
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or len(lowerCAmelCase__ ) == 0:
raise ValueError(f'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = token_ids
__SCREAMING_SNAKE_CASE : Optional[Any] = len(self.token_ids )
__SCREAMING_SNAKE_CASE : Tuple = -1 # the index of the currently fulfilled step
__SCREAMING_SNAKE_CASE : int = False
def __magic_name__( self :List[str] ) -> List[str]:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def __magic_name__( self :Dict , lowerCAmelCase__ :int ) -> List[str]:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(lowerCAmelCase__ )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def __magic_name__( self :int , lowerCAmelCase__ :int ) -> Tuple:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(lowerCAmelCase__ )}''' )
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : Optional[Any] = False
__SCREAMING_SNAKE_CASE : List[str] = False
if self.does_advance(lowerCAmelCase__ ):
self.fulfilled_idx += 1
__SCREAMING_SNAKE_CASE : Dict = True
if self.fulfilled_idx == (self.seqlen - 1):
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
__SCREAMING_SNAKE_CASE : int = completed
else:
# failed to make progress.
__SCREAMING_SNAKE_CASE : List[str] = True
self.reset()
return stepped, completed, reset
def __magic_name__( self :Any ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = False
__SCREAMING_SNAKE_CASE : str = 0
def __magic_name__( self :Any ) -> str:
return self.seqlen - (self.fulfilled_idx + 1)
def __magic_name__( self :str , lowerCAmelCase__ :Optional[Any]=False ) -> List[str]:
__SCREAMING_SNAKE_CASE : int = PhrasalConstraint(self.token_ids )
if stateful:
__SCREAMING_SNAKE_CASE : Any = self.seqlen
__SCREAMING_SNAKE_CASE : Dict = self.fulfilled_idx
__SCREAMING_SNAKE_CASE : List[str] = self.completed
return new_constraint
class _lowercase :
'''simple docstring'''
def __init__( self :Optional[int] , lowerCAmelCase__ :List[List[int]] , lowerCAmelCase__ :List[Any]=True ) -> str:
__SCREAMING_SNAKE_CASE : Any = max([len(lowerCAmelCase__ ) for one in nested_token_ids] )
__SCREAMING_SNAKE_CASE : int = {}
for token_ids in nested_token_ids:
__SCREAMING_SNAKE_CASE : int = root
for tidx, token_id in enumerate(lowerCAmelCase__ ):
if token_id not in level:
__SCREAMING_SNAKE_CASE : Any = {}
__SCREAMING_SNAKE_CASE : Dict = level[token_id]
if no_subsets and self.has_subsets(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
f''' {nested_token_ids}.''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = root
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :Dict ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : int = self.trie
for current_token in current_seq:
__SCREAMING_SNAKE_CASE : List[Any] = start[current_token]
__SCREAMING_SNAKE_CASE : Optional[Any] = list(start.keys() )
return next_tokens
def __magic_name__( self :Tuple , lowerCAmelCase__ :Tuple ) -> Tuple:
__SCREAMING_SNAKE_CASE : Optional[int] = self.next_tokens(lowerCAmelCase__ )
return len(lowerCAmelCase__ ) == 0
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Union[str, Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = list(root.values() )
if len(lowerCAmelCase__ ) == 0:
return 1
else:
return sum([self.count_leaves(lowerCAmelCase__ ) for nn in next_nodes] )
def __magic_name__( self :int , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :int ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Any = self.count_leaves(lowerCAmelCase__ )
return len(lowerCAmelCase__ ) != leaf_count
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :Union[str, Any] , lowerCAmelCase__ :List[List[int]] ) -> int:
super(lowerCAmelCase__ , self ).__init__()
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or len(lowerCAmelCase__ ) == 0:
raise ValueError(f'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) for token_ids in nested_token_ids ):
raise ValueError(f'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
__SCREAMING_SNAKE_CASE : str = DisjunctiveTrie(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = nested_token_ids
__SCREAMING_SNAKE_CASE : Optional[Any] = self.trie.max_height
__SCREAMING_SNAKE_CASE : List[Any] = []
__SCREAMING_SNAKE_CASE : Tuple = False
def __magic_name__( self :List[Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.trie.next_tokens(self.current_seq )
if len(lowerCAmelCase__ ) == 0:
return None
else:
return token_list
def __magic_name__( self :Dict , lowerCAmelCase__ :int ) -> Union[str, Any]:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(lowerCAmelCase__ )}''' )
__SCREAMING_SNAKE_CASE : Tuple = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def __magic_name__( self :Any , lowerCAmelCase__ :int ) -> Union[str, Any]:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(lowerCAmelCase__ )}''' )
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
if self.does_advance(lowerCAmelCase__ ):
self.current_seq.append(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = True
else:
__SCREAMING_SNAKE_CASE : List[str] = True
self.reset()
__SCREAMING_SNAKE_CASE : Optional[Any] = self.trie.reached_leaf(self.current_seq )
__SCREAMING_SNAKE_CASE : Any = completed
return stepped, completed, reset
def __magic_name__( self :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
def __magic_name__( self :Optional[Any] ) -> Tuple:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def __magic_name__( self :Any , lowerCAmelCase__ :Tuple=False ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = DisjunctiveConstraint(self.token_ids )
if stateful:
__SCREAMING_SNAKE_CASE : int = self.seqlen
__SCREAMING_SNAKE_CASE : List[Any] = self.current_seq
__SCREAMING_SNAKE_CASE : Optional[int] = self.completed
return new_constraint
class _lowercase :
'''simple docstring'''
def __init__( self :Optional[Any] , lowerCAmelCase__ :List[Constraint] ) -> int:
__SCREAMING_SNAKE_CASE : Union[str, Any] = constraints
# max # of steps required to fulfill a given constraint
__SCREAMING_SNAKE_CASE : Optional[Any] = max([c.seqlen for c in constraints] )
__SCREAMING_SNAKE_CASE : Tuple = len(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = False
self.init_state()
def __magic_name__( self :int ) -> List[str]:
__SCREAMING_SNAKE_CASE : Optional[int] = []
__SCREAMING_SNAKE_CASE : List[str] = None
__SCREAMING_SNAKE_CASE : List[str] = [constraint.copy(stateful=lowerCAmelCase__ ) for constraint in self.constraints]
def __magic_name__( self :List[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Any = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def __magic_name__( self :Optional[Any] ) -> str:
__SCREAMING_SNAKE_CASE : Optional[Any] = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
__SCREAMING_SNAKE_CASE : Union[str, Any] = constraint.advance()
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
token_list.append(lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
token_list.extend(lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : Tuple = self.inprogress_constraint.advance()
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
token_list.append(lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
token_list.extend(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) == 0:
return None
else:
return token_list
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :Optional[List[int]] ) -> int:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[int] = self.add(lowerCAmelCase__ )
# the entire list of constraints are fulfilled
if self.completed:
break
def __magic_name__( self :str , lowerCAmelCase__ :int ) -> Any:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError(f'''`token_id` should be an `int`, but is `{token_id}`.''' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[int] = False, False
if self.completed:
__SCREAMING_SNAKE_CASE : Dict = True
__SCREAMING_SNAKE_CASE : Dict = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = self.inprogress_constraint.update(lowerCAmelCase__ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : int = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
__SCREAMING_SNAKE_CASE : int = None
if len(self.pending_constraints ) == 0:
# we're done!
__SCREAMING_SNAKE_CASE : List[Any] = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[int] = pending_constraint.update(lowerCAmelCase__ )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = None
if not complete and stepped:
__SCREAMING_SNAKE_CASE : Dict = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
__SCREAMING_SNAKE_CASE : Tuple = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
__SCREAMING_SNAKE_CASE : Dict = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def __magic_name__( self :int , lowerCAmelCase__ :List[Any]=True ) -> List[str]:
__SCREAMING_SNAKE_CASE : Tuple = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
__SCREAMING_SNAKE_CASE : Dict = [
constraint.copy(stateful=lowerCAmelCase__ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
__SCREAMING_SNAKE_CASE : List[str] = self.inprogress_constraint.copy(stateful=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 9 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__lowerCAmelCase : List[Any] =datasets.load_iris()
__lowerCAmelCase : Tuple =np.array(data['data'])
__lowerCAmelCase : Dict =np.array(data['target'])
__lowerCAmelCase : List[str] =data['target_names']
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : str =train_test_split(X, y)
def _UpperCamelCase ( lowercase__ , lowercase__ ):
return np.linalg.norm(np.array(lowercase__ ) - np.array(lowercase__ ) )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=5 ):
__SCREAMING_SNAKE_CASE : Optional[int] = zip(lowercase__ , lowercase__ )
# List of distances of all points from the point to be classified
__SCREAMING_SNAKE_CASE : Dict = []
for data_point in data:
__SCREAMING_SNAKE_CASE : Tuple = euclidean_distance(data_point[0] , lowercase__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
__SCREAMING_SNAKE_CASE : int = [i[1] for i in sorted(lowercase__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__SCREAMING_SNAKE_CASE : Any = Counter(lowercase__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 9 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : str =logging.get_logger(__name__)
__lowerCAmelCase : str ={
'edbeeching/decision-transformer-gym-hopper-medium': (
'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''decision_transformer'''
SCREAMING_SNAKE_CASE__ : Dict = ['''past_key_values''']
SCREAMING_SNAKE_CASE__ : Any = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self :Union[str, Any] , lowerCAmelCase__ :Dict=17 , lowerCAmelCase__ :Optional[Any]=4 , lowerCAmelCase__ :List[Any]=128 , lowerCAmelCase__ :Any=4_096 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :List[str]=1 , lowerCAmelCase__ :Union[str, Any]=1_024 , lowerCAmelCase__ :List[Any]=3 , lowerCAmelCase__ :int=1 , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :Tuple="relu" , lowerCAmelCase__ :int=0.1 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :Any=0.1 , lowerCAmelCase__ :Any=1E-5 , lowerCAmelCase__ :Dict=0.02 , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :Optional[Any]=50_256 , lowerCAmelCase__ :List[Any]=50_256 , lowerCAmelCase__ :Union[str, Any]=False , lowerCAmelCase__ :List[Any]=False , **lowerCAmelCase__ :Optional[Any] , ) -> int:
__SCREAMING_SNAKE_CASE : List[Any] = state_dim
__SCREAMING_SNAKE_CASE : Any = act_dim
__SCREAMING_SNAKE_CASE : str = hidden_size
__SCREAMING_SNAKE_CASE : List[Any] = max_ep_len
__SCREAMING_SNAKE_CASE : Optional[int] = action_tanh
__SCREAMING_SNAKE_CASE : List[str] = vocab_size
__SCREAMING_SNAKE_CASE : Dict = n_positions
__SCREAMING_SNAKE_CASE : Optional[int] = n_layer
__SCREAMING_SNAKE_CASE : Optional[Any] = n_head
__SCREAMING_SNAKE_CASE : List[str] = n_inner
__SCREAMING_SNAKE_CASE : str = activation_function
__SCREAMING_SNAKE_CASE : Union[str, Any] = resid_pdrop
__SCREAMING_SNAKE_CASE : Dict = embd_pdrop
__SCREAMING_SNAKE_CASE : List[str] = attn_pdrop
__SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_epsilon
__SCREAMING_SNAKE_CASE : int = initializer_range
__SCREAMING_SNAKE_CASE : Any = scale_attn_weights
__SCREAMING_SNAKE_CASE : int = use_cache
__SCREAMING_SNAKE_CASE : int = scale_attn_by_inverse_layer_idx
__SCREAMING_SNAKE_CASE : Optional[Any] = reorder_and_upcast_attn
__SCREAMING_SNAKE_CASE : int = bos_token_id
__SCREAMING_SNAKE_CASE : Dict = eos_token_id
super().__init__(bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
| 9 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class _lowercase :
'''simple docstring'''
@staticmethod
def __magic_name__( *lowerCAmelCase__ :Union[str, Any] , **lowerCAmelCase__ :str ) -> Union[str, Any]:
pass
def _UpperCamelCase ( lowercase__ ):
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
__lowerCAmelCase : str =(
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def __magic_name__( self :Any , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Any ) -> Any:
__SCREAMING_SNAKE_CASE : Optional[int] = pipeline(
'''document-question-answering''' , model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = INVOICE_URL
__SCREAMING_SNAKE_CASE : Optional[Any] = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , '''''' ) ) )
__SCREAMING_SNAKE_CASE : str = '''What is the placebo?'''
__SCREAMING_SNAKE_CASE : str = [
{
'''image''': load_image(lowerCAmelCase__ ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Tuple ) -> str:
__SCREAMING_SNAKE_CASE : Any = dqa_pipeline(lowerCAmelCase__ , top_k=2 )
self.assertEqual(
lowerCAmelCase__ , [
[
{'''score''': ANY(lowerCAmelCase__ ), '''answer''': ANY(lowerCAmelCase__ ), '''start''': ANY(lowerCAmelCase__ ), '''end''': ANY(lowerCAmelCase__ )},
{'''score''': ANY(lowerCAmelCase__ ), '''answer''': ANY(lowerCAmelCase__ ), '''start''': ANY(lowerCAmelCase__ ), '''end''': ANY(lowerCAmelCase__ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def __magic_name__( self :Dict ) -> List[str]:
__SCREAMING_SNAKE_CASE : Tuple = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
__SCREAMING_SNAKE_CASE : Dict = INVOICE_URL
__SCREAMING_SNAKE_CASE : int = '''How many cats are there?'''
__SCREAMING_SNAKE_CASE : Optional[int] = [
{'''score''': 0.0001, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0001, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
__SCREAMING_SNAKE_CASE : Tuple = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , lowerCAmelCase__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__SCREAMING_SNAKE_CASE : Any = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__SCREAMING_SNAKE_CASE : List[Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(lowerCAmelCase__ , [] )
# We can optionnally pass directly the words and bounding boxes
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : Union[str, Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , words=lowerCAmelCase__ , boxes=lowerCAmelCase__ , top_k=2 )
self.assertEqual(lowerCAmelCase__ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __magic_name__( self :int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
__SCREAMING_SNAKE_CASE : Dict = INVOICE_URL
__SCREAMING_SNAKE_CASE : Any = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : Any = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : Tuple = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : List[Any] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __magic_name__( self :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : int = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = INVOICE_URL
__SCREAMING_SNAKE_CASE : Tuple = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : List[str] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : int = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : str = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __magic_name__( self :int ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=lowerCAmelCase__ , revision='''3dc6de3''' , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = INVOICE_URL
__SCREAMING_SNAKE_CASE : str = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : Dict = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
__SCREAMING_SNAKE_CASE : Optional[int] = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , '''''' ) ) )
# This model should also work if `image` is set to None
__SCREAMING_SNAKE_CASE : str = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __magic_name__( self :str ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=lowerCAmelCase__ , revision='''3dc6de3''' , max_seq_len=50 , )
__SCREAMING_SNAKE_CASE : List[str] = INVOICE_URL
__SCREAMING_SNAKE_CASE : Dict = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : List[Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : Optional[int] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
__SCREAMING_SNAKE_CASE : List[str] = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , '''''' ) ) )
# This model should also work if `image` is set to None
__SCREAMING_SNAKE_CASE : List[Any] = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def __magic_name__( self :Union[str, Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : str = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = INVOICE_URL
__SCREAMING_SNAKE_CASE : Optional[int] = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : Tuple = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def __magic_name__( self :Union[str, Any] ) -> Tuple:
pass
| 9 | 1 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :int ) -> Any:
__SCREAMING_SNAKE_CASE : Optional[int] = inspect.getfile(accelerate.test_utils )
__SCREAMING_SNAKE_CASE : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
__SCREAMING_SNAKE_CASE : Optional[Any] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] )
__SCREAMING_SNAKE_CASE : int = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] )
@require_multi_gpu
def __magic_name__( self :Optional[int] ) -> Any:
print(f'''Found {torch.cuda.device_count()} devices.''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCAmelCase__ , env=os.environ.copy() )
@require_multi_gpu
def __magic_name__( self :Optional[int] ) -> Tuple:
print(f'''Found {torch.cuda.device_count()} devices.''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(f'''Command: {cmd}''' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCAmelCase__ , env=os.environ.copy() )
@require_multi_gpu
def __magic_name__( self :Tuple ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCAmelCase__ , env=os.environ.copy() )
@require_multi_gpu
def __magic_name__( self :str ) -> int:
print(f'''Found {torch.cuda.device_count()} devices, using 2 devices only''' )
__SCREAMING_SNAKE_CASE : Dict = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ):
execute_subprocess_async(lowerCAmelCase__ , env=os.environ.copy() )
if __name__ == "__main__":
__lowerCAmelCase : Dict =Accelerator()
__lowerCAmelCase : Any =(accelerator.state.process_index + 2, 1_0)
__lowerCAmelCase : List[Any] =torch.randint(0, 1_0, shape).to(accelerator.device)
__lowerCAmelCase : Tuple =''
__lowerCAmelCase : Union[str, Any] =accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
__lowerCAmelCase : Union[str, Any] =accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
__lowerCAmelCase : Optional[int] =accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 9 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : Any ={'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int =[
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 9 | 1 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__lowerCAmelCase : Tuple =get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__lowerCAmelCase : Any =1_2_8_0_2_2
__lowerCAmelCase : List[Any] =1_2_8_0_2_8
@require_sentencepiece
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = MaMaaaTokenizer
SCREAMING_SNAKE_CASE__ : List[Any] = False
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : Dict = True
def __magic_name__( self :Dict ) -> Any:
super().setUp()
__SCREAMING_SNAKE_CASE : int = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
__SCREAMING_SNAKE_CASE : Any = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = Path(self.tmpdirname )
save_json(lowerCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(lowerCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
__SCREAMING_SNAKE_CASE : List[str] = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__( self :Union[str, Any] , **lowerCAmelCase__ :List[str] ) -> List[str]:
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __magic_name__( self :str , lowerCAmelCase__ :Tuple ) -> Any:
return (
"This is a test",
"This is a test",
)
def __magic_name__( self :List[str] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''</s>'''
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def __magic_name__( self :Tuple ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Any = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''</s>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''<s>''' )
self.assertEqual(len(lowerCAmelCase__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('''Skip this test while all models are still to be uploaded.''' )
def __magic_name__( self :Optional[Any] ) -> int:
pass
def __magic_name__( self :int ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [2, 3, 4, 5, 6] , )
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(lowerCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
__SCREAMING_SNAKE_CASE : int = tokenizer.convert_tokens_to_string(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , '''This is a test''' )
@slow
def __magic_name__( self :Optional[int] ) -> List[Any]:
# fmt: off
__SCREAMING_SNAKE_CASE : Union[str, Any] = {'''input_ids''': [[128_022, 110_108, 397, 11, 38_272, 2_247, 124_811, 285, 18_105, 1_586, 207, 7, 39_534, 4_428, 397, 1_019, 18_105, 1_586, 207, 7, 41_337, 16_786, 241, 7, 20_214, 17, 125_690, 10_398, 7, 44_378, 58_069, 68_342, 7_798, 7_343, 11, 299, 33_310, 4, 158, 37_350, 94_077, 4_569, 299, 33_310, 90, 4, 52_840, 290, 4, 31_270, 112, 299, 682, 4, 52_840, 39_953, 14_079, 193, 52_519, 90_894, 17_894, 120_697, 11, 40_445, 551, 17, 1_019, 52_519, 90_894, 17_756, 963, 11, 40_445, 480, 17, 9_792, 1_120, 5_173, 1_393, 6_240, 16_786, 241, 120_996, 28, 1_245, 1_393, 118_240, 11_123, 1_019, 93_612, 2_691, 10_618, 98_058, 120_409, 1_928, 279, 4, 40_683, 367, 178, 207, 1_019, 103, 103_121, 506, 65_296, 5, 2], [128_022, 21_217, 367, 117, 125_450, 128, 719, 7, 7_308, 40, 93_612, 12_669, 1_116, 16_704, 71, 17_785, 3_699, 15_592, 35, 144, 9_584, 241, 11_943, 713, 950, 799, 2_247, 88_427, 150, 149, 118_813, 120_706, 1_019, 106_906, 81_518, 28, 1_224, 22_799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128_022, 1_658, 123_311, 5_155, 5_578, 4_722, 279, 14_947, 2_366, 1_120, 1_197, 14, 1_348, 9_232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''facebook/m2m100_418M'''
SCREAMING_SNAKE_CASE__ : str = [
'''In my opinion, there are two levels of response from the French government.''',
'''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''',
]
SCREAMING_SNAKE_CASE__ : Dict = [
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
]
# fmt: off
SCREAMING_SNAKE_CASE__ : int = [EN_CODE, 593, 1_949, 115_781, 4, 71_586, 4_234, 60_633, 126_233, 432, 123_808, 15_592, 1_197, 117_132, 120_618, 5, 2]
@classmethod
def __magic_name__( cls :Dict ) -> int:
__SCREAMING_SNAKE_CASE : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''' )
__SCREAMING_SNAKE_CASE : Dict = 1
return cls
def __magic_name__( self :int ) -> Dict:
self.assertEqual(self.tokenizer.get_lang_id('''ar''' ) , 128_006 )
self.assertEqual(self.tokenizer.get_lang_id('''en''' ) , 128_022 )
self.assertEqual(self.tokenizer.get_lang_id('''ro''' ) , 128_076 )
self.assertEqual(self.tokenizer.get_lang_id('''mr''' ) , 128_063 )
def __magic_name__( self :Dict ) -> Any:
__SCREAMING_SNAKE_CASE : Any = self.tokenizer.get_vocab()
self.assertEqual(len(lowerCAmelCase__ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab['''<unk>'''] , 3 )
self.assertIn(self.tokenizer.get_lang_token('''en''' ) , lowerCAmelCase__ )
def __magic_name__( self :str ) -> Tuple:
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''en'''
__SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
def __magic_name__( self :Any ) -> Optional[int]:
self.assertIn(lowerCAmelCase__ , self.tokenizer.all_special_ids )
# fmt: off
__SCREAMING_SNAKE_CASE : Optional[int] = [FR_CODE, 5_364, 82, 8_642, 4, 294, 47, 8, 14_028, 136, 3_286, 9_706, 6, 90_797, 6, 144_012, 162, 88_128, 30_061, 5, 2]
# fmt: on
__SCREAMING_SNAKE_CASE : Tuple = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase__ )
def __magic_name__( self :str ) -> int:
__SCREAMING_SNAKE_CASE : Dict = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : List[str] = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = MaMaaaTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertDictEqual(new_tok.lang_token_to_id , lowerCAmelCase__ )
@require_torch
def __magic_name__( self :Optional[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : Optional[int] = '''en'''
__SCREAMING_SNAKE_CASE : Dict = '''fr'''
__SCREAMING_SNAKE_CASE : List[str] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , return_tensors='''pt''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = shift_tokens_right(
batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
__SCREAMING_SNAKE_CASE : List[str] = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __magic_name__( self :Dict ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Dict = '''mr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
__SCREAMING_SNAKE_CASE : int = '''zh'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def __magic_name__( self :List[Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = '''mr'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
__SCREAMING_SNAKE_CASE : Optional[int] = '''zh'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def __magic_name__( self :Tuple ) -> str:
__SCREAMING_SNAKE_CASE : Dict = self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''' )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
# en_XX, A, test, EOS
'''input_ids''': [[128_022, 58, 4_183, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 128_006,
} , )
| 9 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :int , lowerCAmelCase__ :NestedDataStructureLike[PathLike] , lowerCAmelCase__ :Optional[NamedSplit] = None , lowerCAmelCase__ :Optional[Features] = None , lowerCAmelCase__ :str = None , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[int] = None , **lowerCAmelCase__ :Optional[int] , ) -> Tuple:
super().__init__(
lowerCAmelCase__ , split=lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ , streaming=lowerCAmelCase__ , num_proc=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : List[str] = path_or_paths if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else {self.split: path_or_paths}
__SCREAMING_SNAKE_CASE : int = Text(
cache_dir=lowerCAmelCase__ , data_files=lowerCAmelCase__ , features=lowerCAmelCase__ , **lowerCAmelCase__ , )
def __magic_name__( self :Dict ) -> Tuple:
# Build iterable dataset
if self.streaming:
__SCREAMING_SNAKE_CASE : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__SCREAMING_SNAKE_CASE : List[str] = None
__SCREAMING_SNAKE_CASE : str = None
__SCREAMING_SNAKE_CASE : Dict = None
__SCREAMING_SNAKE_CASE : Tuple = None
self.builder.download_and_prepare(
download_config=lowerCAmelCase__ , download_mode=lowerCAmelCase__ , verification_mode=lowerCAmelCase__ , base_path=lowerCAmelCase__ , num_proc=self.num_proc , )
__SCREAMING_SNAKE_CASE : Optional[int] = self.builder.as_dataset(
split=self.split , verification_mode=lowerCAmelCase__ , in_memory=self.keep_in_memory )
return dataset
| 9 | 1 |
from typing import Any
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
_validation(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , )
# Creates data structures and fill initial step
__SCREAMING_SNAKE_CASE : dict = {}
__SCREAMING_SNAKE_CASE : dict = {}
for state in states_space:
__SCREAMING_SNAKE_CASE : List[Any] = observations_space[0]
__SCREAMING_SNAKE_CASE : str = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
__SCREAMING_SNAKE_CASE : List[Any] = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(lowercase__ ) ):
__SCREAMING_SNAKE_CASE : List[Any] = observations_space[o]
__SCREAMING_SNAKE_CASE : Optional[Any] = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
__SCREAMING_SNAKE_CASE : int = ''''''
__SCREAMING_SNAKE_CASE : Any = -1
for k_state in states_space:
__SCREAMING_SNAKE_CASE : int = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
__SCREAMING_SNAKE_CASE : Tuple = probability
__SCREAMING_SNAKE_CASE : Union[str, Any] = k_state
# Update probabilities and pointers dicts
__SCREAMING_SNAKE_CASE : str = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = arg_max
# The final observation
__SCREAMING_SNAKE_CASE : Union[str, Any] = observations_space[len(lowercase__ ) - 1]
# argmax for given final observation
__SCREAMING_SNAKE_CASE : Tuple = ''''''
__SCREAMING_SNAKE_CASE : Optional[int] = -1
for k_state in states_space:
__SCREAMING_SNAKE_CASE : Dict = probabilities[(k_state, final_observation)]
if probability > max_probability:
__SCREAMING_SNAKE_CASE : int = probability
__SCREAMING_SNAKE_CASE : Optional[int] = k_state
__SCREAMING_SNAKE_CASE : Optional[int] = arg_max
# Process pointers backwards
__SCREAMING_SNAKE_CASE : List[Any] = last_state
__SCREAMING_SNAKE_CASE : Optional[Any] = []
for o in range(len(lowercase__ ) - 1 , -1 , -1 ):
result.append(lowercase__ )
__SCREAMING_SNAKE_CASE : Tuple = pointers[previous, observations_space[o]]
result.reverse()
return result
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
_validate_not_empty(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , )
_validate_lists(lowercase__ , lowercase__ )
_validate_dicts(
lowercase__ , lowercase__ , lowercase__ )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
_validate_list(lowercase__ , '''observations_space''' )
_validate_list(lowercase__ , '''states_space''' )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
if not isinstance(_object , lowercase__ ):
__SCREAMING_SNAKE_CASE : str = F'''{var_name} must be a list'''
raise ValueError(lowercase__ )
else:
for x in _object:
if not isinstance(lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : str = F'''{var_name} must be a list of strings'''
raise ValueError(lowercase__ )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , ):
_validate_dict(lowercase__ , '''initial_probabilities''' , lowercase__ )
_validate_nested_dict(lowercase__ , '''transition_probabilities''' )
_validate_nested_dict(lowercase__ , '''emission_probabilities''' )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
_validate_dict(_object , lowercase__ , lowercase__ )
for x in _object.values():
_validate_dict(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = False ):
if not isinstance(_object , lowercase__ ):
__SCREAMING_SNAKE_CASE : str = F'''{var_name} must be a dict'''
raise ValueError(lowercase__ )
if not all(isinstance(lowercase__ , lowercase__ ) for x in _object ):
__SCREAMING_SNAKE_CASE : Dict = F'''{var_name} all keys must be strings'''
raise ValueError(lowercase__ )
if not all(isinstance(lowercase__ , lowercase__ ) for x in _object.values() ):
__SCREAMING_SNAKE_CASE : Tuple = '''nested dictionary ''' if nested else ''''''
__SCREAMING_SNAKE_CASE : Optional[Any] = F'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(lowercase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 9 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Dict = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
__SCREAMING_SNAKE_CASE : List[str] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert('''RGB''' )
return image
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = dct.pop(lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = val
def _UpperCamelCase ( lowercase__ , lowercase__ ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
__SCREAMING_SNAKE_CASE : int = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__SCREAMING_SNAKE_CASE : Optional[int] = torch.cat((q_bias, torch.zeros_like(lowercase__ , requires_grad=lowercase__ ), v_bias) )
__SCREAMING_SNAKE_CASE : Optional[Any] = qkv_bias
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Any = 364 if '''coco''' in model_name else 224
__SCREAMING_SNAKE_CASE : List[str] = BlipaVisionConfig(image_size=lowercase__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=lowercase__ ).to_dict()
elif "opt-6.7b" in model_name:
__SCREAMING_SNAKE_CASE : List[Any] = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=lowercase__ ).to_dict()
elif "t5-xl" in model_name:
__SCREAMING_SNAKE_CASE : Optional[Any] = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
__SCREAMING_SNAKE_CASE : Optional[int] = BlipaConfig(vision_config=lowercase__ , text_config=lowercase__ )
return config, image_size
@torch.no_grad()
def _UpperCamelCase ( lowercase__ , lowercase__=None , lowercase__=False ):
__SCREAMING_SNAKE_CASE : Any = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
__SCREAMING_SNAKE_CASE : str = tokenizer('''\n''' , add_special_tokens=lowercase__ ).input_ids[0]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = get_blipa_config(lowercase__ , eos_token_id=lowercase__ )
__SCREAMING_SNAKE_CASE : int = BlipaForConditionalGeneration(lowercase__ ).eval()
__SCREAMING_SNAKE_CASE : int = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
__SCREAMING_SNAKE_CASE : List[str] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = load_model_and_preprocess(
name=lowercase__ , model_type=lowercase__ , is_eval=lowercase__ , device=lowercase__ )
original_model.eval()
print('''Done!''' )
# update state dict keys
__SCREAMING_SNAKE_CASE : List[str] = original_model.state_dict()
__SCREAMING_SNAKE_CASE : Optional[int] = create_rename_keys(lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__SCREAMING_SNAKE_CASE : Tuple = state_dict.pop(lowercase__ )
if key.startswith('''Qformer.bert''' ):
__SCREAMING_SNAKE_CASE : List[str] = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
__SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
__SCREAMING_SNAKE_CASE : Dict = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
__SCREAMING_SNAKE_CASE : Tuple = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
__SCREAMING_SNAKE_CASE : List[str] = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
__SCREAMING_SNAKE_CASE : Tuple = key.replace('''t5''' , '''language''' )
__SCREAMING_SNAKE_CASE : Tuple = val
# read in qv biases
read_in_q_v_bias(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = hf_model.load_state_dict(lowercase__ , strict=lowercase__ )
assert len(lowercase__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__SCREAMING_SNAKE_CASE : List[str] = load_demo_image()
__SCREAMING_SNAKE_CASE : Any = vis_processors['''eval'''](lowercase__ ).unsqueeze(0 ).to(lowercase__ )
__SCREAMING_SNAKE_CASE : str = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(lowercase__ )
# create processor
__SCREAMING_SNAKE_CASE : List[Any] = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=lowercase__ , image_std=lowercase__ )
__SCREAMING_SNAKE_CASE : int = BlipaProcessor(image_processor=lowercase__ , tokenizer=lowercase__ )
__SCREAMING_SNAKE_CASE : Any = processor(images=lowercase__ , return_tensors='''pt''' ).pixel_values.to(lowercase__ )
# make sure processor creates exact same pixel values
assert torch.allclose(lowercase__ , lowercase__ )
original_model.to(lowercase__ )
hf_model.to(lowercase__ )
with torch.no_grad():
if "opt" in model_name:
__SCREAMING_SNAKE_CASE : Dict = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
__SCREAMING_SNAKE_CASE : Dict = hf_model(lowercase__ , lowercase__ ).logits
else:
__SCREAMING_SNAKE_CASE : int = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
__SCREAMING_SNAKE_CASE : List[Any] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
__SCREAMING_SNAKE_CASE : Optional[int] = hf_model(lowercase__ , lowercase__ , labels=lowercase__ ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__SCREAMING_SNAKE_CASE : Dict = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=lowercase__ )
assert torch.allclose(logits[0, :3, :3] , lowercase__ , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__SCREAMING_SNAKE_CASE : Any = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=lowercase__ )
else:
# cast to same type
__SCREAMING_SNAKE_CASE : Optional[Any] = logits.dtype
assert torch.allclose(original_logits.to(lowercase__ ) , lowercase__ , atol=1e-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
__SCREAMING_SNAKE_CASE : Any = ''''''
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(lowercase__ , return_tensors='''pt''' ).input_ids.to(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = original_model.generate({'''image''': original_pixel_values} )
__SCREAMING_SNAKE_CASE : Union[str, Any] = hf_model.generate(
lowercase__ , lowercase__ , do_sample=lowercase__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = input_ids.shape[1]
__SCREAMING_SNAKE_CASE : Any = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = [text.strip() for text in output_text]
print('''HF generation:''' , lowercase__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(lowercase__ )
hf_model.save_pretrained(lowercase__ )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
__lowerCAmelCase : List[str] =argparse.ArgumentParser()
__lowerCAmelCase : Tuple =[
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
__lowerCAmelCase : List[Any] =parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 9 | 1 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
__lowerCAmelCase : Dict ={'vocab_file': 'vocab.txt'}
__lowerCAmelCase : List[Any] ={
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
__lowerCAmelCase : Any ={
'openbmb/cpm-ant-10b': 1_0_2_4,
}
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = collections.OrderedDict()
with open(lowercase__ , '''r''' , encoding='''utf-8''' ) as reader:
__SCREAMING_SNAKE_CASE : Optional[int] = reader.readlines()
for index, token in enumerate(lowercase__ ):
__SCREAMING_SNAKE_CASE : int = token.rstrip('''\n''' )
__SCREAMING_SNAKE_CASE : Tuple = index
return vocab
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :List[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[Any]="<unk>" , lowerCAmelCase__ :str=200 ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Optional[Any] = vocab
__SCREAMING_SNAKE_CASE : Union[str, Any] = unk_token
__SCREAMING_SNAKE_CASE : Optional[Any] = max_input_chars_per_word
def __magic_name__( self :str , lowerCAmelCase__ :Tuple ) -> Dict:
__SCREAMING_SNAKE_CASE : Union[str, Any] = list(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > self.max_input_chars_per_word:
return [self.unk_token]
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
__SCREAMING_SNAKE_CASE : int = []
while start < len(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = len(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = None
while start < end:
__SCREAMING_SNAKE_CASE : List[str] = ''''''.join(chars[start:end] )
if substr in self.vocab:
__SCREAMING_SNAKE_CASE : Optional[int] = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = end
return sub_tokens
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Any = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : List[Any] = ['''input_ids''', '''attention_mask''']
SCREAMING_SNAKE_CASE__ : Any = False
def __init__( self :Tuple , lowerCAmelCase__ :int , lowerCAmelCase__ :int="<d>" , lowerCAmelCase__ :int="</d>" , lowerCAmelCase__ :Optional[Any]="<s>" , lowerCAmelCase__ :Optional[Any]="</s>" , lowerCAmelCase__ :Optional[int]="<pad>" , lowerCAmelCase__ :Dict="<unk>" , lowerCAmelCase__ :Any="</n>" , lowerCAmelCase__ :List[str]="</_>" , lowerCAmelCase__ :Optional[Any]="left" , **lowerCAmelCase__ :Tuple , ) -> Optional[Any]:
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=lowerCAmelCase__ , eod_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , line_token=lowerCAmelCase__ , space_token=lowerCAmelCase__ , padding_side=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Optional[int] = bod_token
__SCREAMING_SNAKE_CASE : List[str] = eod_token
__SCREAMING_SNAKE_CASE : Optional[int] = load_vocab(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = self.encoder[space_token]
__SCREAMING_SNAKE_CASE : str = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
__SCREAMING_SNAKE_CASE : Dict = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCAmelCase__ : x[1] ) )
__SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in self.encoder.items()}
__SCREAMING_SNAKE_CASE : Any = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __magic_name__( self :List[Any] ) -> int:
return self.encoder[self.bod_token]
@property
def __magic_name__( self :int ) -> List[Any]:
return self.encoder[self.eod_token]
@property
def __magic_name__( self :List[Any] ) -> Union[str, Any]:
return self.encoder["\n"]
@property
def __magic_name__( self :Optional[int] ) -> int:
return len(self.encoder )
def __magic_name__( self :int ) -> Optional[int]:
return dict(self.encoder , **self.added_tokens_encoder )
def __magic_name__( self :str , lowerCAmelCase__ :List[str] ) -> Any:
__SCREAMING_SNAKE_CASE : int = []
for x in jieba.cut(lowerCAmelCase__ , cut_all=lowerCAmelCase__ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowerCAmelCase__ ) )
return output_tokens
def __magic_name__( self :str , lowerCAmelCase__ :Tuple , **lowerCAmelCase__ :Optional[int] ) -> List[str]:
__SCREAMING_SNAKE_CASE : Any = [i for i in token_ids if i >= 0]
__SCREAMING_SNAKE_CASE : int = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__( self :int , lowerCAmelCase__ :Tuple ) -> List[str]:
return token in self.encoder
def __magic_name__( self :Tuple , lowerCAmelCase__ :List[str] ) -> str:
return "".join(lowerCAmelCase__ )
def __magic_name__( self :List[str] , lowerCAmelCase__ :List[Any] ) -> List[Any]:
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token ) )
def __magic_name__( self :Any , lowerCAmelCase__ :Any ) -> Optional[Any]:
return self.decoder.get(lowerCAmelCase__ , self.unk_token )
def __magic_name__( self :List[str] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]:
if os.path.isdir(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
__SCREAMING_SNAKE_CASE : Optional[int] = 0
if " " in self.encoder:
__SCREAMING_SNAKE_CASE : List[str] = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
__SCREAMING_SNAKE_CASE : Tuple = self.encoder['''\n''']
del self.encoder["\n"]
__SCREAMING_SNAKE_CASE : Optional[int] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCAmelCase__ : x[1] ) )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
''' Please check that the vocabulary is not corrupted!''' )
__SCREAMING_SNAKE_CASE : Any = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :List[int] = None ) -> List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __magic_name__( self :str , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None , lowerCAmelCase__ :bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is not None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ ))
return [1] + ([0] * len(lowerCAmelCase__ ))
| 9 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
__lowerCAmelCase : Optional[Any] ={'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : List[str] ={
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
__lowerCAmelCase : Optional[int] ={
'gpt-neox-20b': 2_0_4_8,
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Optional[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self :int , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :List[Any]=None , lowerCAmelCase__ :str="<|endoftext|>" , lowerCAmelCase__ :str="<|endoftext|>" , lowerCAmelCase__ :Dict="<|endoftext|>" , lowerCAmelCase__ :Union[str, Any]=False , **lowerCAmelCase__ :List[str] , ) -> Any:
super().__init__(
lowerCAmelCase__ , lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , lowerCAmelCase__ ) != add_prefix_space:
__SCREAMING_SNAKE_CASE : List[str] = getattr(lowerCAmelCase__ , pre_tok_state.pop('''type''' ) )
__SCREAMING_SNAKE_CASE : str = add_prefix_space
__SCREAMING_SNAKE_CASE : Any = pre_tok_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = add_prefix_space
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]:
__SCREAMING_SNAKE_CASE : List[str] = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :"Conversation" ) -> List[int]:
__SCREAMING_SNAKE_CASE : Optional[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) + [self.eos_token_id] )
if len(lowerCAmelCase__ ) > self.model_max_length:
__SCREAMING_SNAKE_CASE : List[str] = input_ids[-self.model_max_length :]
return input_ids
| 9 | 1 |
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self :Optional[Any] , lowerCAmelCase__ :Optional[Any] ) -> str:
__SCREAMING_SNAKE_CASE : Optional[Any] = parent
def __magic_name__( self :List[Any] ) -> Tuple:
return {}
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Optional[Any] = '''<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR="FFFFFF">
<HR>
<a href="http://google.com">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style="color:#0000FF">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>'''
__SCREAMING_SNAKE_CASE : str = '''
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
'''
return [html_string_a, html_string_a]
@require_bsa
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = MarkupLMFeatureExtractor if is_bsa_available() else None
def __magic_name__( self :int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = MarkupLMFeatureExtractionTester(self )
@property
def __magic_name__( self :Any ) -> Optional[Any]:
return self.feature_extract_tester.prepare_feat_extract_dict()
def __magic_name__( self :Optional[int] ) -> Any:
# Initialize feature_extractor
__SCREAMING_SNAKE_CASE : int = self.feature_extraction_class()
# Test not batched input
__SCREAMING_SNAKE_CASE : Tuple = get_html_strings()[0]
__SCREAMING_SNAKE_CASE : Dict = feature_extractor(lowerCAmelCase__ )
# fmt: off
__SCREAMING_SNAKE_CASE : str = [['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']]
__SCREAMING_SNAKE_CASE : List[str] = [['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']]
# fmt: on
self.assertEqual(encoding.nodes , lowerCAmelCase__ )
self.assertEqual(encoding.xpaths , lowerCAmelCase__ )
# Test batched
__SCREAMING_SNAKE_CASE : Tuple = get_html_strings()
__SCREAMING_SNAKE_CASE : Dict = feature_extractor(lowerCAmelCase__ )
# fmt: off
__SCREAMING_SNAKE_CASE : int = expected_nodes + [['''My First Heading''', '''My first paragraph.''']]
__SCREAMING_SNAKE_CASE : str = expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , lowerCAmelCase__ )
self.assertEqual(encoding.xpaths , lowerCAmelCase__ )
| 9 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__lowerCAmelCase : Optional[Any] ='\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
__lowerCAmelCase : Any ='\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
__lowerCAmelCase : Optional[Any] =r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def __magic_name__( self :Optional[Any] ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def __magic_name__( self :Any , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Tuple = 0.0
for i, j in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
n_correct += 1.0 if math_equivalence.is_equiv(lowerCAmelCase__ , lowerCAmelCase__ ) else 0.0
__SCREAMING_SNAKE_CASE : str = n_correct / len(lowerCAmelCase__ )
return {
"accuracy": accuracy,
}
| 9 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = '''naver-clova-ix/donut-base-finetuned-docvqa'''
SCREAMING_SNAKE_CASE__ : Any = (
'''This is a tool that answers a question about an document (pdf). It takes an input named `document` which '''
'''should be the document containing the information, as well as a `question` that is the question about the '''
'''document. It returns a text that contains the answer to the question.'''
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''document_qa'''
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor
SCREAMING_SNAKE_CASE__ : Union[str, Any] = VisionEncoderDecoderModel
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ['''image''', '''text''']
SCREAMING_SNAKE_CASE__ : str = ['''text''']
def __init__( self :Union[str, Any] , *lowerCAmelCase__ :str , **lowerCAmelCase__ :Tuple ) -> str:
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__( self :str , lowerCAmelCase__ :"Image" , lowerCAmelCase__ :str ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
__SCREAMING_SNAKE_CASE : Optional[Any] = task_prompt.replace('''{user_input}''' , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = self.pre_processor.tokenizer(
lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_tensors='''pt''' ).input_ids
__SCREAMING_SNAKE_CASE : Optional[int] = self.pre_processor(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __magic_name__( self :str , lowerCAmelCase__ :Optional[Any] ) -> List[Any]:
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=lowerCAmelCase__ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=lowerCAmelCase__ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=lowerCAmelCase__ , ).sequences
def __magic_name__( self :int , lowerCAmelCase__ :int ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.pre_processor.batch_decode(lowerCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE : Optional[int] = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' )
__SCREAMING_SNAKE_CASE : Tuple = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' )
__SCREAMING_SNAKE_CASE : int = re.sub(r'''<.*?>''' , '''''' , lowerCAmelCase__ , count=1 ).strip() # remove first task start token
__SCREAMING_SNAKE_CASE : Optional[int] = self.pre_processor.tokenajson(lowerCAmelCase__ )
return sequence["answer"]
| 9 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _lowercase :
'''simple docstring'''
def __init__( self :Optional[int] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :int=13 , lowerCAmelCase__ :List[str]=7 , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :str=True , lowerCAmelCase__ :List[Any]=99 , lowerCAmelCase__ :List[str]=32 , lowerCAmelCase__ :Any=5 , lowerCAmelCase__ :List[str]=4 , lowerCAmelCase__ :int=37 , lowerCAmelCase__ :Optional[int]="gelu" , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Optional[Any]=512 , lowerCAmelCase__ :Union[str, Any]=16 , lowerCAmelCase__ :Dict=2 , lowerCAmelCase__ :Tuple=0.02 , lowerCAmelCase__ :List[Any]=3 , lowerCAmelCase__ :Tuple=4 , lowerCAmelCase__ :int=None , ) -> int:
__SCREAMING_SNAKE_CASE : Dict = parent
__SCREAMING_SNAKE_CASE : Any = batch_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = seq_length
__SCREAMING_SNAKE_CASE : Optional[Any] = is_training
__SCREAMING_SNAKE_CASE : int = use_token_type_ids
__SCREAMING_SNAKE_CASE : Any = use_labels
__SCREAMING_SNAKE_CASE : Any = vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = hidden_size
__SCREAMING_SNAKE_CASE : int = num_hidden_layers
__SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : str = intermediate_size
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
__SCREAMING_SNAKE_CASE : List[str] = type_sequence_label_size
__SCREAMING_SNAKE_CASE : List[str] = initializer_range
__SCREAMING_SNAKE_CASE : Tuple = num_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_choices
__SCREAMING_SNAKE_CASE : Union[str, Any] = scope
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.vocab_size - 1
def __magic_name__( self :Optional[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE : Dict = None
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE : Optional[int] = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__SCREAMING_SNAKE_CASE : Any = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __magic_name__( self :Tuple , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Any , *lowerCAmelCase__ :Union[str, Any] ) -> Any:
__SCREAMING_SNAKE_CASE : Any = OpenAIGPTModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Dict = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , head_mask=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Dict , *lowerCAmelCase__ :List[Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = OpenAIGPTLMHeadModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__( self :Tuple , lowerCAmelCase__ :Dict , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , *lowerCAmelCase__ :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : Any = OpenAIGPTDoubleHeadsModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Any = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__( self :Dict , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :str , *lowerCAmelCase__ :Optional[int] ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
__SCREAMING_SNAKE_CASE : List[Any] = OpenAIGPTForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__( self :Optional[Any] ) -> str:
__SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) : List[str] = config_and_inputs
__SCREAMING_SNAKE_CASE : List[str] = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class _lowercase ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : str = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
SCREAMING_SNAKE_CASE__ : str = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :str , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any] ) -> Tuple:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def __magic_name__( self :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :int , lowerCAmelCase__ :int=False ) -> Dict:
__SCREAMING_SNAKE_CASE : Tuple = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__SCREAMING_SNAKE_CASE : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Tuple = inputs_dict['''labels''']
__SCREAMING_SNAKE_CASE : Dict = inputs_dict['''labels''']
__SCREAMING_SNAKE_CASE : List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
return inputs_dict
def __magic_name__( self :Optional[int] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : int = OpenAIGPTModelTester(self )
__SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase__ , n_embd=37 )
def __magic_name__( self :Any ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __magic_name__( self :List[str] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowerCAmelCase__ )
def __magic_name__( self :int ) -> int:
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> Dict:
__SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> str:
__SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowerCAmelCase__ )
@slow
def __magic_name__( self :Any ) -> List[Any]:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Dict = OpenAIGPTModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __magic_name__( self :Union[str, Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[str] = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[481, 4_735, 544]] , dtype=torch.long , device=lowerCAmelCase__ ) # the president is
__SCREAMING_SNAKE_CASE : Dict = [
481,
4_735,
544,
246,
963,
870,
762,
239,
244,
40_477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__SCREAMING_SNAKE_CASE : Dict = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ )
self.assertListEqual(output_ids[0].tolist() , lowerCAmelCase__ )
| 9 | 1 |
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def _UpperCamelCase ( lowercase__ ):
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
__lowerCAmelCase : Optional[Any] ='\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class _lowercase ( A__ ):
'''simple docstring'''
@staticmethod
def __magic_name__( lowerCAmelCase__ :ArgumentParser ) -> Any:
__SCREAMING_SNAKE_CASE : List[Any] = parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=lowerCAmelCase__ , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=lowerCAmelCase__ )
def __init__( self :int , lowerCAmelCase__ :str , lowerCAmelCase__ :str , lowerCAmelCase__ :str , lowerCAmelCase__ :str , lowerCAmelCase__ :str , *lowerCAmelCase__ :List[Any] , ) -> Dict:
__SCREAMING_SNAKE_CASE : str = logging.get_logger('''transformers-cli/converting''' )
self._logger.info(f'''Loading model {model_type}''' )
__SCREAMING_SNAKE_CASE : Optional[int] = model_type
__SCREAMING_SNAKE_CASE : str = tf_checkpoint
__SCREAMING_SNAKE_CASE : Optional[int] = pytorch_dump_output
__SCREAMING_SNAKE_CASE : str = config
__SCREAMING_SNAKE_CASE : Optional[int] = finetuning_task_name
def __magic_name__( self :int ) -> List[str]:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(lowerCAmelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase__ )
if "ckpt" in self._tf_checkpoint.lower():
__SCREAMING_SNAKE_CASE : Dict = self._tf_checkpoint
__SCREAMING_SNAKE_CASE : int = ''''''
else:
__SCREAMING_SNAKE_CASE : int = self._tf_checkpoint
__SCREAMING_SNAKE_CASE : List[Any] = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
lowerCAmelCase__ , self._config , self._pytorch_dump_output , lowerCAmelCase__ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase__ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase__ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
| 9 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _UpperCamelCase ( *lowercase__ , lowercase__ = None , lowercase__=True , lowercase__=2 ):
from .. import __version__
__SCREAMING_SNAKE_CASE : Optional[Any] = take_from
__SCREAMING_SNAKE_CASE : List[str] = ()
if not isinstance(args[0] , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowercase__ ).base_version ) >= version.parse(lowercase__ ):
raise ValueError(
F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
F''' version {__version__} is >= {version_name}''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if isinstance(lowercase__ , lowercase__ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowercase__ ),)
__SCREAMING_SNAKE_CASE : List[Any] = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(lowercase__ , lowercase__ ):
values += (getattr(lowercase__ , lowercase__ ),)
__SCREAMING_SNAKE_CASE : List[str] = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
__SCREAMING_SNAKE_CASE : str = F'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
__SCREAMING_SNAKE_CASE : Any = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , lowercase__ , stacklevel=lowercase__ )
if isinstance(lowercase__ , lowercase__ ) and len(lowercase__ ) > 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1]
__SCREAMING_SNAKE_CASE : Dict = call_frame.filename
__SCREAMING_SNAKE_CASE : Optional[Any] = call_frame.lineno
__SCREAMING_SNAKE_CASE : int = call_frame.function
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(lowercase__ ) == 0:
return
elif len(lowercase__ ) == 1:
return values[0]
return values
| 9 | 1 |
__lowerCAmelCase : int =tuple[float, float, float]
__lowerCAmelCase : List[str] =tuple[float, float, float]
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = end_pointa[0] - end_pointa[0]
__SCREAMING_SNAKE_CASE : List[Any] = end_pointa[1] - end_pointa[1]
__SCREAMING_SNAKE_CASE : Union[str, Any] = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = ab[1] * ac[2] - ab[2] * ac[1] # *i
__SCREAMING_SNAKE_CASE : List[str] = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
__SCREAMING_SNAKE_CASE : Any = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _UpperCamelCase ( lowercase__ , lowercase__ ):
return tuple(round(lowercase__ , lowercase__ ) for x in vector ) == (0, 0, 0)
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = 10 ):
__SCREAMING_SNAKE_CASE : Tuple = create_vector(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = create_vector(lowercase__ , lowercase__ )
return is_zero_vector(get_ad_vectors_cross(lowercase__ , lowercase__ ) , lowercase__ )
| 9 |
from __future__ import annotations
import bisect
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 0 , lowercase__ = -1 ):
if hi < 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = len(lowercase__ )
while lo < hi:
__SCREAMING_SNAKE_CASE : Any = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__SCREAMING_SNAKE_CASE : Union[str, Any] = mid + 1
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = mid
return lo
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 0 , lowercase__ = -1 ):
if hi < 0:
__SCREAMING_SNAKE_CASE : List[Any] = len(lowercase__ )
while lo < hi:
__SCREAMING_SNAKE_CASE : Optional[int] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__SCREAMING_SNAKE_CASE : Any = mid + 1
else:
__SCREAMING_SNAKE_CASE : Optional[int] = mid
return lo
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 0 , lowercase__ = -1 ):
sorted_collection.insert(bisect_left(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) , lowercase__ )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 0 , lowercase__ = -1 ):
sorted_collection.insert(bisect_right(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) , lowercase__ )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Any = 0
__SCREAMING_SNAKE_CASE : List[Any] = len(lowercase__ ) - 1
while left <= right:
__SCREAMING_SNAKE_CASE : str = left + (right - left) // 2
__SCREAMING_SNAKE_CASE : List[str] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__SCREAMING_SNAKE_CASE : int = midpoint - 1
else:
__SCREAMING_SNAKE_CASE : Dict = midpoint + 1
return None
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = bisect.bisect_left(lowercase__ , lowercase__ )
if index != len(lowercase__ ) and sorted_collection[index] == item:
return index
return None
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
if right < left:
return None
__SCREAMING_SNAKE_CASE : int = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(lowercase__ , lowercase__ , lowercase__ , midpoint - 1 )
else:
return binary_search_by_recursion(lowercase__ , lowercase__ , midpoint + 1 , lowercase__ )
if __name__ == "__main__":
__lowerCAmelCase : Dict =input('Enter numbers separated by comma:\n').strip()
__lowerCAmelCase : str =sorted(int(item) for item in user_input.split(','))
__lowerCAmelCase : Tuple =int(input('Enter a single number to be found in the list:\n'))
__lowerCAmelCase : Tuple =binary_search(collection, target)
if result is None:
print(f"""{target} was not found in {collection}.""")
else:
print(f"""{target} was found at position {result} in {collection}.""")
| 9 | 1 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :int , lowerCAmelCase__ :Union[str, "sqlalchemy.sql.Selectable"] , lowerCAmelCase__ :Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , lowerCAmelCase__ :Optional[Features] = None , lowerCAmelCase__ :str = None , lowerCAmelCase__ :bool = False , **lowerCAmelCase__ :Union[str, Any] , ) -> Optional[int]:
super().__init__(features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = Sql(
cache_dir=lowerCAmelCase__ , features=lowerCAmelCase__ , sql=lowerCAmelCase__ , con=lowerCAmelCase__ , **lowerCAmelCase__ , )
def __magic_name__( self :List[Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : int = None
__SCREAMING_SNAKE_CASE : Optional[Any] = None
__SCREAMING_SNAKE_CASE : Dict = None
self.builder.download_and_prepare(
download_config=lowerCAmelCase__ , download_mode=lowerCAmelCase__ , verification_mode=lowerCAmelCase__ , base_path=lowerCAmelCase__ , )
# Build dataset for splits
__SCREAMING_SNAKE_CASE : List[Any] = self.builder.as_dataset(
split='''train''' , verification_mode=lowerCAmelCase__ , in_memory=self.keep_in_memory )
return dataset
class _lowercase :
'''simple docstring'''
def __init__( self :int , lowerCAmelCase__ :Dataset , lowerCAmelCase__ :str , lowerCAmelCase__ :Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :Optional[int] = None , **lowerCAmelCase__ :Union[str, Any] , ) -> int:
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
__SCREAMING_SNAKE_CASE : List[Any] = dataset
__SCREAMING_SNAKE_CASE : Tuple = name
__SCREAMING_SNAKE_CASE : Optional[Any] = con
__SCREAMING_SNAKE_CASE : Union[str, Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__SCREAMING_SNAKE_CASE : List[Any] = num_proc
__SCREAMING_SNAKE_CASE : Optional[int] = to_sql_kwargs
def __magic_name__( self :Tuple ) -> int:
__SCREAMING_SNAKE_CASE : Optional[int] = self.to_sql_kwargs.pop('''sql''' , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.to_sql_kwargs.pop('''con''' , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = self.to_sql_kwargs.pop('''index''' , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = self._write(index=lowerCAmelCase__ , **self.to_sql_kwargs )
return written
def __magic_name__( self :Dict , lowerCAmelCase__ :List[Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = args
__SCREAMING_SNAKE_CASE : Union[str, Any] = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs
__SCREAMING_SNAKE_CASE : List[str] = query_table(
table=self.dataset.data , key=slice(lowerCAmelCase__ , offset + self.batch_size ) , indices=self.dataset._indices , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = batch.to_pandas()
__SCREAMING_SNAKE_CASE : Union[str, Any] = df.to_sql(self.name , self.con , index=lowerCAmelCase__ , **lowerCAmelCase__ )
return num_rows or len(lowerCAmelCase__ )
def __magic_name__( self :str , lowerCAmelCase__ :str , **lowerCAmelCase__ :Tuple ) -> int:
__SCREAMING_SNAKE_CASE : str = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[int] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , lowerCAmelCase__ , lowerCAmelCase__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += num_rows
return written
| 9 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Union[str, Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : str = '''ylacombe/bark-small'''
__SCREAMING_SNAKE_CASE : Optional[int] = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : str = '''en_speaker_1'''
__SCREAMING_SNAKE_CASE : Any = '''This is a test string'''
__SCREAMING_SNAKE_CASE : int = '''speaker_embeddings_path.json'''
__SCREAMING_SNAKE_CASE : int = '''speaker_embeddings'''
def __magic_name__( self :List[str] , **lowerCAmelCase__ :Union[str, Any] ) -> Any:
return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> int:
shutil.rmtree(self.tmpdirname )
def __magic_name__( self :Dict ) -> str:
__SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Tuple = BarkProcessor(tokenizer=lowerCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : Optional[Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __magic_name__( self :Tuple ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Dict = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
__SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __magic_name__( self :List[str] ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
__SCREAMING_SNAKE_CASE : str = 35
__SCREAMING_SNAKE_CASE : str = 2
__SCREAMING_SNAKE_CASE : List[Any] = 8
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''semantic_prompt''': np.ones(lowerCAmelCase__ ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
__SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=self.input_string , voice_preset=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
__SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = processor(text=self.input_string , voice_preset=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
__SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def __magic_name__( self :Tuple ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Any = BarkProcessor(tokenizer=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = processor(text=self.input_string )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer(
self.input_string , padding='''max_length''' , max_length=256 , add_special_tokens=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 9 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = '''Salesforce/blip-image-captioning-base'''
SCREAMING_SNAKE_CASE__ : Optional[int] = (
'''This is a tool that generates a description of an image. It takes an input named `image` which should be the '''
'''image to caption, and returns a text that contains the description in English.'''
)
SCREAMING_SNAKE_CASE__ : Tuple = '''image_captioner'''
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoModelForVisionaSeq
SCREAMING_SNAKE_CASE__ : List[str] = ['''image''']
SCREAMING_SNAKE_CASE__ : Dict = ['''text''']
def __init__( self :Tuple , *lowerCAmelCase__ :Union[str, Any] , **lowerCAmelCase__ :Optional[Any] ) -> Dict:
requires_backends(self , ['''vision'''] )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__( self :List[str] , lowerCAmelCase__ :"Image" ) -> Optional[Any]:
return self.pre_processor(images=lowerCAmelCase__ , return_tensors='''pt''' )
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Union[str, Any] ) -> str:
return self.model.generate(**lowerCAmelCase__ )
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :List[str] ) -> int:
return self.pre_processor.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )[0].strip()
| 9 |
from importlib import import_module
from .logging import get_logger
__lowerCAmelCase : str =get_logger(__name__)
class _lowercase :
'''simple docstring'''
def __init__( self :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :str=None ) -> int:
__SCREAMING_SNAKE_CASE : List[str] = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('''__''' ):
setattr(self , lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = module._original_module if isinstance(lowerCAmelCase__ , _PatchedModuleObj ) else module
class _lowercase :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = []
def __init__( self :Tuple , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Any , lowerCAmelCase__ :Dict=None ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = obj
__SCREAMING_SNAKE_CASE : str = target
__SCREAMING_SNAKE_CASE : Dict = new
__SCREAMING_SNAKE_CASE : Union[str, Any] = target.split('''.''' )[0]
__SCREAMING_SNAKE_CASE : List[str] = {}
__SCREAMING_SNAKE_CASE : Tuple = attrs or []
def __enter__( self :int ) -> Dict:
*__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = self.target.split('''.''' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowerCAmelCase__ ) ):
try:
__SCREAMING_SNAKE_CASE : Any = import_module('''.'''.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
__SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(self.obj , lowerCAmelCase__ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowerCAmelCase__ , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
__SCREAMING_SNAKE_CASE : int = obj_attr
# patch at top level
setattr(self.obj , lowerCAmelCase__ , _PatchedModuleObj(lowerCAmelCase__ , attrs=self.attrs ) )
__SCREAMING_SNAKE_CASE : List[str] = getattr(self.obj , lowerCAmelCase__ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowerCAmelCase__ , lowerCAmelCase__ , _PatchedModuleObj(getattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , attrs=self.attrs ) )
__SCREAMING_SNAKE_CASE : Tuple = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
# finally set the target attribute
setattr(lowerCAmelCase__ , lowerCAmelCase__ , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
__SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(import_module('''.'''.join(lowerCAmelCase__ ) ) , lowerCAmelCase__ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowerCAmelCase__ ) is attr_value:
__SCREAMING_SNAKE_CASE : Any = getattr(self.obj , lowerCAmelCase__ )
setattr(self.obj , lowerCAmelCase__ , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
__SCREAMING_SNAKE_CASE : Union[str, Any] = globals()['''__builtins__'''][target_attr]
setattr(self.obj , lowerCAmelCase__ , self.new )
else:
raise RuntimeError(f'''Tried to patch attribute {target_attr} instead of a submodule.''' )
def __exit__( self :str , *lowerCAmelCase__ :Union[str, Any] ) -> Optional[int]:
for attr in list(self.original ):
setattr(self.obj , lowerCAmelCase__ , self.original.pop(lowerCAmelCase__ ) )
def __magic_name__( self :List[Any] ) -> List[Any]:
self.__enter__()
self._active_patches.append(self )
def __magic_name__( self :Optional[int] ) -> int:
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 9 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : Union[str, Any] ={
'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int =[
'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimesformerModel',
'TimesformerForVideoClassification',
'TimesformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : str =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 9 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__lowerCAmelCase : List[str] ='true'
def _UpperCamelCase ( lowercase__ , lowercase__=82 , lowercase__=16 ):
set_seed(42 )
__SCREAMING_SNAKE_CASE : Optional[int] = RegressionModel()
__SCREAMING_SNAKE_CASE : Optional[int] = deepcopy(lowercase__ )
__SCREAMING_SNAKE_CASE : Any = RegressionDataset(length=lowercase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = DataLoader(lowercase__ , batch_size=lowercase__ )
model.to(accelerator.device )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = accelerator.prepare(lowercase__ , lowercase__ )
return model, ddp_model, dataloader
def _UpperCamelCase ( lowercase__ , lowercase__=False ):
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
__SCREAMING_SNAKE_CASE : str = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
with accelerator.main_process_first():
__SCREAMING_SNAKE_CASE : Tuple = dataset.map(
lowercase__ , batched=lowercase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
__SCREAMING_SNAKE_CASE : List[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowercase__ ):
if use_longest:
return tokenizer.pad(lowercase__ , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(lowercase__ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return DataLoader(lowercase__ , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=16 )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : str = Accelerator(dispatch_batches=lowercase__ , split_batches=lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = get_dataloader(lowercase__ , not dispatch_batches )
__SCREAMING_SNAKE_CASE : List[str] = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = accelerator.prepare(lowercase__ , lowercase__ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = []
for batch in dataloader:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = batch.values()
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Dict = model(lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = [], []
for logit, targ in logits_and_targets:
logits.append(lowercase__ )
targs.append(lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = torch.cat(lowercase__ ), torch.cat(lowercase__ )
return logits, targs
def _UpperCamelCase ( lowercase__ , lowercase__=82 , lowercase__=False , lowercase__=False , lowercase__=16 ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = get_basic_setup(lowercase__ , lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = generate_predictions(lowercase__ , lowercase__ , lowercase__ )
assert (
len(lowercase__ ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowercase__ )}'''
def _UpperCamelCase ( lowercase__ = False , lowercase__ = False ):
__SCREAMING_SNAKE_CASE : Optional[Any] = evaluate.load('''glue''' , '''mrpc''' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = get_mrpc_setup(lowercase__ , lowercase__ )
# First do baseline
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = setup['''no''']
model.to(lowercase__ )
model.eval()
for batch in dataloader:
batch.to(lowercase__ )
with torch.inference_mode():
__SCREAMING_SNAKE_CASE : Dict = model(**lowercase__ )
__SCREAMING_SNAKE_CASE : Dict = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowercase__ , references=batch['''labels'''] )
__SCREAMING_SNAKE_CASE : int = metric.compute()
# Then do distributed
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
__SCREAMING_SNAKE_CASE : int = model(**lowercase__ )
__SCREAMING_SNAKE_CASE : str = outputs.logits.argmax(dim=-1 )
__SCREAMING_SNAKE_CASE : Any = batch['''labels''']
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowercase__ , references=lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Dict = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(lowercase__ , lowercase__ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__SCREAMING_SNAKE_CASE : List[Any] = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(lowercase__ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
__SCREAMING_SNAKE_CASE : Tuple = Accelerator()
test_torch_metrics(lowercase__ , 512 )
accelerator.state._reset_state()
def _UpperCamelCase ( lowercase__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 9 | 1 |
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
__lowerCAmelCase : List[str] =logging.get_logger(__name__)
# General docstring
__lowerCAmelCase : int ='PoolFormerConfig'
# Base docstring
__lowerCAmelCase : Tuple ='sail/poolformer_s12'
__lowerCAmelCase : str =[1, 5_1_2, 7, 7]
# Image classification docstring
__lowerCAmelCase : Any ='sail/poolformer_s12'
__lowerCAmelCase : Dict ='tabby, tabby cat'
__lowerCAmelCase : int =[
'sail/poolformer_s12',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def _UpperCamelCase ( lowercase__ , lowercase__ = 0.0 , lowercase__ = False ):
if drop_prob == 0.0 or not training:
return input
__SCREAMING_SNAKE_CASE : Any = 1 - drop_prob
__SCREAMING_SNAKE_CASE : Tuple = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
__SCREAMING_SNAKE_CASE : Union[str, Any] = keep_prob + torch.rand(lowercase__ , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
__SCREAMING_SNAKE_CASE : Optional[Any] = input.div(lowercase__ ) * random_tensor
return output
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self :Any , lowerCAmelCase__ :Optional[float] = None ) -> None:
super().__init__()
__SCREAMING_SNAKE_CASE : int = drop_prob
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :torch.Tensor ) -> torch.Tensor:
return drop_path(lowerCAmelCase__ , self.drop_prob , self.training )
def __magic_name__( self :Any ) -> str:
return "p={}".format(self.drop_prob )
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self :str , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Dict=None ) -> Any:
super().__init__()
__SCREAMING_SNAKE_CASE : Union[str, Any] = patch_size if isinstance(lowerCAmelCase__ , collections.abc.Iterable ) else (patch_size, patch_size)
__SCREAMING_SNAKE_CASE : Union[str, Any] = stride if isinstance(lowerCAmelCase__ , collections.abc.Iterable ) else (stride, stride)
__SCREAMING_SNAKE_CASE : Any = padding if isinstance(lowerCAmelCase__ , collections.abc.Iterable ) else (padding, padding)
__SCREAMING_SNAKE_CASE : Any = nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=lowerCAmelCase__ , stride=lowerCAmelCase__ , padding=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = norm_layer(lowerCAmelCase__ ) if norm_layer else nn.Identity()
def __magic_name__( self :Any , lowerCAmelCase__ :str ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : List[str] = self.projection(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = self.norm(lowerCAmelCase__ )
return embeddings
class _lowercase ( nn.GroupNorm ):
'''simple docstring'''
def __init__( self :Any , lowerCAmelCase__ :List[str] , **lowerCAmelCase__ :Optional[int] ) -> Any:
super().__init__(1 , lowerCAmelCase__ , **lowerCAmelCase__ )
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self :Dict , lowerCAmelCase__ :Dict ) -> str:
super().__init__()
__SCREAMING_SNAKE_CASE : List[str] = nn.AvgPoolad(lowerCAmelCase__ , stride=1 , padding=pool_size // 2 , count_include_pad=lowerCAmelCase__ )
def __magic_name__( self :Dict , lowerCAmelCase__ :Tuple ) -> str:
return self.pool(lowerCAmelCase__ ) - hidden_states
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self :List[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[Any] ) -> List[str]:
super().__init__()
__SCREAMING_SNAKE_CASE : List[str] = nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
__SCREAMING_SNAKE_CASE : Dict = PoolFormerDropPath(lowerCAmelCase__ )
if isinstance(config.hidden_act , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = ACTaFN[config.hidden_act]
else:
__SCREAMING_SNAKE_CASE : Optional[int] = config.hidden_act
def __magic_name__( self :Any , lowerCAmelCase__ :Dict ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[int] = self.conva(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = self.act_fn(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.drop(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.conva(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.drop(lowerCAmelCase__ )
return hidden_states
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self :int , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[str] ) -> Any:
super().__init__()
__SCREAMING_SNAKE_CASE : Union[str, Any] = PoolFormerPooling(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = PoolFormerOutput(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = PoolFormerGroupNorm(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = PoolFormerGroupNorm(lowerCAmelCase__ )
# Useful for training neural nets
__SCREAMING_SNAKE_CASE : Optional[Any] = PoolFormerDropPath(lowerCAmelCase__ ) if drop_path > 0.0 else nn.Identity()
__SCREAMING_SNAKE_CASE : int = config.use_layer_scale
if config.use_layer_scale:
__SCREAMING_SNAKE_CASE : List[str] = nn.Parameter(
config.layer_scale_init_value * torch.ones((lowerCAmelCase__) ) , requires_grad=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = nn.Parameter(
config.layer_scale_init_value * torch.ones((lowerCAmelCase__) ) , requires_grad=lowerCAmelCase__ )
def __magic_name__( self :List[str] , lowerCAmelCase__ :Optional[int] ) -> Optional[Any]:
if self.use_layer_scale:
__SCREAMING_SNAKE_CASE : int = self.pooling(self.before_norm(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Dict = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
__SCREAMING_SNAKE_CASE : Any = hidden_states + self.drop_path(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = ()
__SCREAMING_SNAKE_CASE : Dict = self.output(self.after_norm(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Any = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_states + self.drop_path(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = (output,) + outputs
return outputs
else:
__SCREAMING_SNAKE_CASE : Tuple = self.drop_path(self.pooling(self.before_norm(lowerCAmelCase__ ) ) )
# First residual connection
__SCREAMING_SNAKE_CASE : Dict = pooling_output + hidden_states
__SCREAMING_SNAKE_CASE : List[Any] = ()
# Second residual connection inside the PoolFormerOutput block
__SCREAMING_SNAKE_CASE : int = self.drop_path(self.output(self.after_norm(lowerCAmelCase__ ) ) )
__SCREAMING_SNAKE_CASE : str = hidden_states + layer_output
__SCREAMING_SNAKE_CASE : str = (output,) + outputs
return outputs
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self :Union[str, Any] , lowerCAmelCase__ :Dict ) -> Optional[int]:
super().__init__()
__SCREAMING_SNAKE_CASE : Optional[int] = config
# stochastic depth decay rule
__SCREAMING_SNAKE_CASE : List[str] = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
__SCREAMING_SNAKE_CASE : Dict = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
__SCREAMING_SNAKE_CASE : List[Any] = nn.ModuleList(lowerCAmelCase__ )
# Transformer blocks
__SCREAMING_SNAKE_CASE : Any = []
__SCREAMING_SNAKE_CASE : Any = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
__SCREAMING_SNAKE_CASE : int = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
lowerCAmelCase__ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = nn.ModuleList(lowerCAmelCase__ )
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Dict=False , lowerCAmelCase__ :str=True ) -> Any:
__SCREAMING_SNAKE_CASE : Optional[Any] = () if output_hidden_states else None
__SCREAMING_SNAKE_CASE : Optional[Any] = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = layers
# Get patch embeddings from hidden_states
__SCREAMING_SNAKE_CASE : Dict = embedding_layer(lowerCAmelCase__ )
# Send the embeddings through the blocks
for _, blk in enumerate(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : str = blk(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = layer_outputs[0]
if output_hidden_states:
__SCREAMING_SNAKE_CASE : Any = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowerCAmelCase__ , hidden_states=lowerCAmelCase__ )
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = PoolFormerConfig
SCREAMING_SNAKE_CASE__ : Optional[int] = '''poolformer'''
SCREAMING_SNAKE_CASE__ : Any = '''pixel_values'''
SCREAMING_SNAKE_CASE__ : Tuple = True
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :int ) -> Optional[int]:
if isinstance(lowerCAmelCase__ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(lowerCAmelCase__ , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any]=False ) -> str:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Tuple = value
__lowerCAmelCase : Optional[int] =r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__lowerCAmelCase : Dict =r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n'
@add_start_docstrings(
'''The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.''' , A__ , )
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :List[str] , lowerCAmelCase__ :List[str] ) -> str:
super().__init__(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = config
__SCREAMING_SNAKE_CASE : Dict = PoolFormerEncoder(lowerCAmelCase__ )
# Initialize weights and apply final processing
self.post_init()
def __magic_name__( self :Dict ) -> int:
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :Optional[torch.FloatTensor] = None , lowerCAmelCase__ :Optional[bool] = None , lowerCAmelCase__ :Optional[bool] = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
__SCREAMING_SNAKE_CASE : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__SCREAMING_SNAKE_CASE : int = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
__SCREAMING_SNAKE_CASE : str = self.encoder(
lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Tuple = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=lowerCAmelCase__ , hidden_states=encoder_outputs.hidden_states , )
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self :str , lowerCAmelCase__ :Any ) -> List[str]:
super().__init__()
__SCREAMING_SNAKE_CASE : int = nn.Linear(config.hidden_size , config.hidden_size )
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :Optional[int] ) -> List[str]:
__SCREAMING_SNAKE_CASE : Dict = self.dense(lowerCAmelCase__ )
return output
@add_start_docstrings(
'''
PoolFormer Model transformer with an image classification head on top
''' , A__ , )
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :Any , lowerCAmelCase__ :Any ) -> Tuple:
super().__init__(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = config.num_labels
__SCREAMING_SNAKE_CASE : str = PoolFormerModel(lowerCAmelCase__ )
# Final norm
__SCREAMING_SNAKE_CASE : Union[str, Any] = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
__SCREAMING_SNAKE_CASE : int = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Optional[torch.FloatTensor] = None , lowerCAmelCase__ :Optional[torch.LongTensor] = None , lowerCAmelCase__ :Optional[bool] = None , lowerCAmelCase__ :Optional[bool] = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
__SCREAMING_SNAKE_CASE : Any = return_dict if return_dict is not None else self.config.use_return_dict
__SCREAMING_SNAKE_CASE : Optional[int] = self.poolformer(
lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Dict = outputs[0]
__SCREAMING_SNAKE_CASE : Dict = self.classifier(self.norm(lowerCAmelCase__ ).mean([-2, -1] ) )
__SCREAMING_SNAKE_CASE : Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__SCREAMING_SNAKE_CASE : List[str] = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__SCREAMING_SNAKE_CASE : Tuple = '''single_label_classification'''
else:
__SCREAMING_SNAKE_CASE : Dict = '''multi_label_classification'''
if self.config.problem_type == "regression":
__SCREAMING_SNAKE_CASE : Optional[int] = MSELoss()
if self.num_labels == 1:
__SCREAMING_SNAKE_CASE : Union[str, Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__SCREAMING_SNAKE_CASE : int = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
elif self.config.problem_type == "single_label_classification":
__SCREAMING_SNAKE_CASE : Any = CrossEntropyLoss()
__SCREAMING_SNAKE_CASE : Optional[int] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__SCREAMING_SNAKE_CASE : List[str] = BCEWithLogitsLoss()
__SCREAMING_SNAKE_CASE : Union[str, Any] = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
if not return_dict:
__SCREAMING_SNAKE_CASE : int = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase__ , logits=lowerCAmelCase__ , hidden_states=outputs.hidden_states )
| 9 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__lowerCAmelCase : Union[str, Any] ={
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
}
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
if got_ver is None or want_ver is None:
raise ValueError(
F'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
F''' reinstalling {pkg}.''' )
if not ops[op](version.parse(lowercase__ ) , version.parse(lowercase__ ) ):
raise ImportError(
F'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def _UpperCamelCase ( lowercase__ , lowercase__ = None ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = F'''\n{hint}''' if hint is not None else ''''''
# non-versioned check
if re.match(R'''^[\w_\-\d]+$''' , lowercase__ ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = requirement, None, None
else:
__SCREAMING_SNAKE_CASE : List[Any] = re.findall(R'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''' , lowercase__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
F''' got {requirement}''' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = match[0]
__SCREAMING_SNAKE_CASE : Optional[int] = want_full.split(''',''' ) # there could be multiple requirements
__SCREAMING_SNAKE_CASE : Optional[Any] = {}
for w in want_range:
__SCREAMING_SNAKE_CASE : Any = re.findall(R'''^([\s!=<>]{1,2})(.+)''' , lowercase__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
F''' but got {requirement}''' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = match[0]
__SCREAMING_SNAKE_CASE : List[Any] = want_ver
if op not in ops:
raise ValueError(F'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
__SCREAMING_SNAKE_CASE : Optional[Any] = '''.'''.join([str(lowercase__ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
return
# check if any version is installed
try:
__SCREAMING_SNAKE_CASE : Optional[int] = importlib.metadata.version(lowercase__ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(lowercase__ , lowercase__ )
| 9 | 1 |
def _UpperCamelCase ( lowercase__ = 1000 ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = 1, 1
__SCREAMING_SNAKE_CASE : List[str] = []
for i in range(1 , n + 1 ):
__SCREAMING_SNAKE_CASE : Optional[Any] = prev_numerator + 2 * prev_denominator
__SCREAMING_SNAKE_CASE : List[Any] = prev_numerator + prev_denominator
if len(str(lowercase__ ) ) > len(str(lowercase__ ) ):
result.append(lowercase__ )
__SCREAMING_SNAKE_CASE : Any = numerator
__SCREAMING_SNAKE_CASE : List[Any] = denominator
return len(lowercase__ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 9 |
from __future__ import annotations
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = 0.00
__SCREAMING_SNAKE_CASE : List[str] = 0
for resistor in resistors:
if resistor <= 0:
__SCREAMING_SNAKE_CASE : Any = F'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(lowercase__ )
first_sum += 1 / float(lowercase__ )
index += 1
return 1 / first_sum
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = 0.00
__SCREAMING_SNAKE_CASE : int = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
__SCREAMING_SNAKE_CASE : Tuple = F'''Resistor at index {index} has a negative value!'''
raise ValueError(lowercase__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 | 1 |
from __future__ import annotations
import bisect
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 0 , lowercase__ = -1 ):
if hi < 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = len(lowercase__ )
while lo < hi:
__SCREAMING_SNAKE_CASE : Any = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__SCREAMING_SNAKE_CASE : Union[str, Any] = mid + 1
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = mid
return lo
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 0 , lowercase__ = -1 ):
if hi < 0:
__SCREAMING_SNAKE_CASE : List[Any] = len(lowercase__ )
while lo < hi:
__SCREAMING_SNAKE_CASE : Optional[int] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__SCREAMING_SNAKE_CASE : Any = mid + 1
else:
__SCREAMING_SNAKE_CASE : Optional[int] = mid
return lo
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 0 , lowercase__ = -1 ):
sorted_collection.insert(bisect_left(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) , lowercase__ )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 0 , lowercase__ = -1 ):
sorted_collection.insert(bisect_right(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) , lowercase__ )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Any = 0
__SCREAMING_SNAKE_CASE : List[Any] = len(lowercase__ ) - 1
while left <= right:
__SCREAMING_SNAKE_CASE : str = left + (right - left) // 2
__SCREAMING_SNAKE_CASE : List[str] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__SCREAMING_SNAKE_CASE : int = midpoint - 1
else:
__SCREAMING_SNAKE_CASE : Dict = midpoint + 1
return None
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = bisect.bisect_left(lowercase__ , lowercase__ )
if index != len(lowercase__ ) and sorted_collection[index] == item:
return index
return None
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
if right < left:
return None
__SCREAMING_SNAKE_CASE : int = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(lowercase__ , lowercase__ , lowercase__ , midpoint - 1 )
else:
return binary_search_by_recursion(lowercase__ , lowercase__ , midpoint + 1 , lowercase__ )
if __name__ == "__main__":
__lowerCAmelCase : Dict =input('Enter numbers separated by comma:\n').strip()
__lowerCAmelCase : str =sorted(int(item) for item in user_input.split(','))
__lowerCAmelCase : Tuple =int(input('Enter a single number to be found in the list:\n'))
__lowerCAmelCase : Tuple =binary_search(collection, target)
if result is None:
print(f"""{target} was not found in {collection}.""")
else:
print(f"""{target} was found at position {result} in {collection}.""")
| 9 |
from ..utils import DummyObject, requires_backends
class _lowercase ( metaclass=A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ['''keras_nlp''']
def __init__( self :Tuple , *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :Dict ) -> Dict:
requires_backends(self , ['''keras_nlp'''] )
| 9 | 1 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__lowerCAmelCase : List[str] ='true'
def _UpperCamelCase ( lowercase__ , lowercase__=82 , lowercase__=16 ):
set_seed(42 )
__SCREAMING_SNAKE_CASE : Optional[int] = RegressionModel()
__SCREAMING_SNAKE_CASE : Optional[int] = deepcopy(lowercase__ )
__SCREAMING_SNAKE_CASE : Any = RegressionDataset(length=lowercase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = DataLoader(lowercase__ , batch_size=lowercase__ )
model.to(accelerator.device )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = accelerator.prepare(lowercase__ , lowercase__ )
return model, ddp_model, dataloader
def _UpperCamelCase ( lowercase__ , lowercase__=False ):
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
__SCREAMING_SNAKE_CASE : str = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
with accelerator.main_process_first():
__SCREAMING_SNAKE_CASE : Tuple = dataset.map(
lowercase__ , batched=lowercase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
__SCREAMING_SNAKE_CASE : List[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowercase__ ):
if use_longest:
return tokenizer.pad(lowercase__ , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(lowercase__ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return DataLoader(lowercase__ , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=16 )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : str = Accelerator(dispatch_batches=lowercase__ , split_batches=lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = get_dataloader(lowercase__ , not dispatch_batches )
__SCREAMING_SNAKE_CASE : List[str] = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = accelerator.prepare(lowercase__ , lowercase__ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = []
for batch in dataloader:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = batch.values()
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Dict = model(lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = [], []
for logit, targ in logits_and_targets:
logits.append(lowercase__ )
targs.append(lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = torch.cat(lowercase__ ), torch.cat(lowercase__ )
return logits, targs
def _UpperCamelCase ( lowercase__ , lowercase__=82 , lowercase__=False , lowercase__=False , lowercase__=16 ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = get_basic_setup(lowercase__ , lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = generate_predictions(lowercase__ , lowercase__ , lowercase__ )
assert (
len(lowercase__ ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowercase__ )}'''
def _UpperCamelCase ( lowercase__ = False , lowercase__ = False ):
__SCREAMING_SNAKE_CASE : Optional[Any] = evaluate.load('''glue''' , '''mrpc''' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = get_mrpc_setup(lowercase__ , lowercase__ )
# First do baseline
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = setup['''no''']
model.to(lowercase__ )
model.eval()
for batch in dataloader:
batch.to(lowercase__ )
with torch.inference_mode():
__SCREAMING_SNAKE_CASE : Dict = model(**lowercase__ )
__SCREAMING_SNAKE_CASE : Dict = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowercase__ , references=batch['''labels'''] )
__SCREAMING_SNAKE_CASE : int = metric.compute()
# Then do distributed
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
__SCREAMING_SNAKE_CASE : int = model(**lowercase__ )
__SCREAMING_SNAKE_CASE : str = outputs.logits.argmax(dim=-1 )
__SCREAMING_SNAKE_CASE : Any = batch['''labels''']
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowercase__ , references=lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Dict = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(lowercase__ , lowercase__ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__SCREAMING_SNAKE_CASE : List[Any] = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(lowercase__ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
__SCREAMING_SNAKE_CASE : Tuple = Accelerator()
test_torch_metrics(lowercase__ , 512 )
accelerator.state._reset_state()
def _UpperCamelCase ( lowercase__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 9 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self :Any , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple=7 , lowerCAmelCase__ :List[Any]=3 , lowerCAmelCase__ :Any=10 , lowerCAmelCase__ :Optional[int]=18 , lowerCAmelCase__ :Dict=30 , lowerCAmelCase__ :Tuple=400 , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :str=True , lowerCAmelCase__ :List[str]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :List[str]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :Optional[Any]=None , ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Dict = size if size is not None else {'''shortest_edge''': 18}
__SCREAMING_SNAKE_CASE : Optional[int] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__SCREAMING_SNAKE_CASE : Tuple = parent
__SCREAMING_SNAKE_CASE : List[Any] = batch_size
__SCREAMING_SNAKE_CASE : List[str] = num_channels
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_frames
__SCREAMING_SNAKE_CASE : Tuple = image_size
__SCREAMING_SNAKE_CASE : Optional[Any] = min_resolution
__SCREAMING_SNAKE_CASE : Any = max_resolution
__SCREAMING_SNAKE_CASE : List[Any] = do_resize
__SCREAMING_SNAKE_CASE : Optional[Any] = size
__SCREAMING_SNAKE_CASE : Optional[int] = do_normalize
__SCREAMING_SNAKE_CASE : List[Any] = image_mean
__SCREAMING_SNAKE_CASE : List[str] = image_std
__SCREAMING_SNAKE_CASE : str = crop_size
def __magic_name__( self :Tuple ) -> Any:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = VivitImageProcessor if is_vision_available() else None
def __magic_name__( self :List[str] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = VivitImageProcessingTester(self )
@property
def __magic_name__( self :int ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__( self :List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_std''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''size''' ) )
def __magic_name__( self :Optional[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def __magic_name__( self :List[Any] ) -> Union[str, Any]:
# Initialize image_processing
__SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
__SCREAMING_SNAKE_CASE : List[Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for video in video_inputs:
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE : List[str] = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __magic_name__( self :str ) -> int:
# Initialize image_processing
__SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__SCREAMING_SNAKE_CASE : List[str] = prepare_video_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for video in video_inputs:
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
__SCREAMING_SNAKE_CASE : Optional[int] = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : Any = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __magic_name__( self :Any ) -> List[str]:
# Initialize image_processing
__SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE : Optional[int] = prepare_video_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for video in video_inputs:
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
__SCREAMING_SNAKE_CASE : Tuple = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : Optional[int] = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 9 | 1 |
from __future__ import annotations
from statistics import mean
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = [0] * no_of_processes
__SCREAMING_SNAKE_CASE : Tuple = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = burst_time[i]
__SCREAMING_SNAKE_CASE : list[int] = []
__SCREAMING_SNAKE_CASE : List[Any] = 0
__SCREAMING_SNAKE_CASE : Any = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__SCREAMING_SNAKE_CASE : Optional[Any] = []
__SCREAMING_SNAKE_CASE : Tuple = -1
for i in range(lowercase__ ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(lowercase__ )
if len(lowercase__ ) > 0:
__SCREAMING_SNAKE_CASE : Optional[Any] = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__SCREAMING_SNAKE_CASE : List[str] = i
total_time += burst_time[target_process]
completed += 1
__SCREAMING_SNAKE_CASE : List[Any] = 0
__SCREAMING_SNAKE_CASE : Optional[int] = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : str = [0] * no_of_processes
for i in range(lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('[TEST CASE 01]')
__lowerCAmelCase : Union[str, Any] =4
__lowerCAmelCase : str =[2, 5, 3, 7]
__lowerCAmelCase : Optional[int] =[0, 0, 0, 0]
__lowerCAmelCase : int =calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__lowerCAmelCase : List[Any] =calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time')
for i, process_id in enumerate(list(range(1, 5))):
print(
f"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
f"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(f"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(f"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 9 |
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self :Optional[Any] , lowerCAmelCase__ :Optional[Any] ) -> str:
__SCREAMING_SNAKE_CASE : Optional[Any] = parent
def __magic_name__( self :List[Any] ) -> Tuple:
return {}
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Optional[Any] = '''<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR="FFFFFF">
<HR>
<a href="http://google.com">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style="color:#0000FF">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>'''
__SCREAMING_SNAKE_CASE : str = '''
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
'''
return [html_string_a, html_string_a]
@require_bsa
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = MarkupLMFeatureExtractor if is_bsa_available() else None
def __magic_name__( self :int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = MarkupLMFeatureExtractionTester(self )
@property
def __magic_name__( self :Any ) -> Optional[Any]:
return self.feature_extract_tester.prepare_feat_extract_dict()
def __magic_name__( self :Optional[int] ) -> Any:
# Initialize feature_extractor
__SCREAMING_SNAKE_CASE : int = self.feature_extraction_class()
# Test not batched input
__SCREAMING_SNAKE_CASE : Tuple = get_html_strings()[0]
__SCREAMING_SNAKE_CASE : Dict = feature_extractor(lowerCAmelCase__ )
# fmt: off
__SCREAMING_SNAKE_CASE : str = [['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']]
__SCREAMING_SNAKE_CASE : List[str] = [['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']]
# fmt: on
self.assertEqual(encoding.nodes , lowerCAmelCase__ )
self.assertEqual(encoding.xpaths , lowerCAmelCase__ )
# Test batched
__SCREAMING_SNAKE_CASE : Tuple = get_html_strings()
__SCREAMING_SNAKE_CASE : Dict = feature_extractor(lowerCAmelCase__ )
# fmt: off
__SCREAMING_SNAKE_CASE : int = expected_nodes + [['''My First Heading''', '''My first paragraph.''']]
__SCREAMING_SNAKE_CASE : str = expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , lowerCAmelCase__ )
self.assertEqual(encoding.xpaths , lowerCAmelCase__ )
| 9 | 1 |
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
__lowerCAmelCase : List[Any] =logging.get_logger(__name__)
__lowerCAmelCase : str ={'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : Any ={
'vocab_file': {
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json',
},
'merges_file': {
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt',
},
'tokenizer_file': {
'Salesforce/codegen-350M-mono': (
'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'
),
},
}
__lowerCAmelCase : Tuple ={
'Salesforce/codegen-350M-mono': 2_0_4_8,
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : str = ['''input_ids''', '''attention_mask''']
SCREAMING_SNAKE_CASE__ : str = CodeGenTokenizer
def __init__( self :List[str] , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :List[str]=None , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :Optional[int]="<|endoftext|>" , lowerCAmelCase__ :Optional[int]="<|endoftext|>" , lowerCAmelCase__ :str="<|endoftext|>" , lowerCAmelCase__ :Any=False , **lowerCAmelCase__ :Dict , ) -> Union[str, Any]:
super().__init__(
lowerCAmelCase__ , lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
if kwargs.pop('''add_bos_token''' , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = kwargs.pop('''name_or_path''' , '''''' )
raise ValueError(
'''Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.'''
'''Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n'''
f'''`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n'''
f'''`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n'''
'''This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.'''
''' so that the fast tokenizer works correctly.''' )
__SCREAMING_SNAKE_CASE : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , lowerCAmelCase__ ) != add_prefix_space:
__SCREAMING_SNAKE_CASE : int = getattr(lowerCAmelCase__ , pre_tok_state.pop('''type''' ) )
__SCREAMING_SNAKE_CASE : Any = add_prefix_space
__SCREAMING_SNAKE_CASE : Dict = pre_tok_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = add_prefix_space
def __magic_name__( self :Optional[Any] , *lowerCAmelCase__ :Optional[int] , **lowerCAmelCase__ :List[Any] ) -> BatchEncoding:
__SCREAMING_SNAKE_CASE : str = kwargs.get('''is_split_into_words''' , lowerCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__( self :List[Any] , *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :List[Any] ) -> BatchEncoding:
__SCREAMING_SNAKE_CASE : List[Any] = kwargs.get('''is_split_into_words''' , lowerCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__( self :List[str] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]:
__SCREAMING_SNAKE_CASE : Dict = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
def __magic_name__( self :Tuple , lowerCAmelCase__ :Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :Optional[List[str]] = None , **lowerCAmelCase__ :Optional[int] , ) -> str:
__SCREAMING_SNAKE_CASE : str = super().decode(
token_ids=lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ , **lowerCAmelCase__ , )
if truncate_before_pattern is not None and len(lowerCAmelCase__ ) > 0:
__SCREAMING_SNAKE_CASE : List[str] = self.truncate(lowerCAmelCase__ , lowerCAmelCase__ )
return decoded_text
def __magic_name__( self :int , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] ) -> Dict:
def find_re(lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :int ):
__SCREAMING_SNAKE_CASE : List[Any] = pattern.search(lowerCAmelCase__ , lowerCAmelCase__ )
return m.start() if m else -1
__SCREAMING_SNAKE_CASE : Optional[int] = [re.compile(lowerCAmelCase__ , re.MULTILINE ) for pattern in truncate_before_pattern]
__SCREAMING_SNAKE_CASE : Union[str, Any] = list(re.finditer('''^print''' , lowerCAmelCase__ , re.MULTILINE ) )
if len(lowerCAmelCase__ ) > 1:
__SCREAMING_SNAKE_CASE : Optional[int] = completion[: prints[1].start()]
__SCREAMING_SNAKE_CASE : int = list(re.finditer('''^def''' , lowerCAmelCase__ , re.MULTILINE ) )
if len(lowerCAmelCase__ ) > 1:
__SCREAMING_SNAKE_CASE : Union[str, Any] = completion[: defs[1].start()]
__SCREAMING_SNAKE_CASE : str = 0
__SCREAMING_SNAKE_CASE : str = [
pos for pos in [find_re(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) for terminal in terminals] if pos != -1
]
if len(lowerCAmelCase__ ) > 0:
return completion[: min(lowerCAmelCase__ )]
else:
return completion
| 9 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = TransfoXLTokenizer
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
def __magic_name__( self :str ) -> Dict:
super().setUp()
__SCREAMING_SNAKE_CASE : List[str] = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
__SCREAMING_SNAKE_CASE : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __magic_name__( self :Any , **lowerCAmelCase__ :int ) -> str:
__SCREAMING_SNAKE_CASE : Optional[Any] = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :List[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : Dict = '''<unk> UNwanted , running'''
__SCREAMING_SNAKE_CASE : List[str] = '''<unk> unwanted, running'''
return input_text, output_text
def __magic_name__( self :Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : int = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(lowerCAmelCase__ , ['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [0, 4, 8, 7] )
def __magic_name__( self :Tuple ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = TransfoXLTokenizer(lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def __magic_name__( self :Tuple ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = TransfoXLTokenizer(lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __magic_name__( self :Dict ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[str] = TransfoXLTokenizer(lower_case=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
__SCREAMING_SNAKE_CASE : Optional[int] = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(tokenizer.convert_tokens_to_string(lowerCAmelCase__ ) , lowerCAmelCase__ )
def __magic_name__( self :str ) -> int:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Any = len(lowerCAmelCase__ )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(lowerCAmelCase__ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , '''new1''' )
| 9 | 1 |
import unittest
from knapsack import greedy_knapsack as kp
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Optional[int] ) -> Tuple:
__SCREAMING_SNAKE_CASE : Tuple = [10, 20, 30, 40, 50, 60]
__SCREAMING_SNAKE_CASE : int = [2, 4, 6, 8, 10, 12]
__SCREAMING_SNAKE_CASE : int = 100
self.assertEqual(kp.calc_profit(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , 210 )
def __magic_name__( self :List[Any] ) -> int:
self.assertRaisesRegex(lowerCAmelCase__ , '''max_weight must greater than zero.''' )
def __magic_name__( self :List[Any] ) -> Optional[Any]:
self.assertRaisesRegex(lowerCAmelCase__ , '''Weight can not be negative.''' )
def __magic_name__( self :Dict ) -> Optional[Any]:
self.assertRaisesRegex(lowerCAmelCase__ , '''Profit can not be negative.''' )
def __magic_name__( self :str ) -> Union[str, Any]:
self.assertRaisesRegex(lowerCAmelCase__ , '''max_weight must greater than zero.''' )
def __magic_name__( self :str ) -> Dict:
self.assertRaisesRegex(
lowerCAmelCase__ , '''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main()
| 9 |
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__=False ):
if isinstance(lowercase__ , lowercase__ ) and isinstance(lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = len(set_a.intersection(lowercase__ ) )
if alternative_union:
__SCREAMING_SNAKE_CASE : int = len(lowercase__ ) + len(lowercase__ )
else:
__SCREAMING_SNAKE_CASE : int = len(set_a.union(lowercase__ ) )
return intersection / union
if isinstance(lowercase__ , (list, tuple) ) and isinstance(lowercase__ , (list, tuple) ):
__SCREAMING_SNAKE_CASE : Dict = [element for element in set_a if element in set_b]
if alternative_union:
__SCREAMING_SNAKE_CASE : Optional[int] = len(lowercase__ ) + len(lowercase__ )
return len(lowercase__ ) / union
else:
__SCREAMING_SNAKE_CASE : Tuple = set_a + [element for element in set_b if element not in set_a]
return len(lowercase__ ) / len(lowercase__ )
return len(lowercase__ ) / len(lowercase__ )
return None
if __name__ == "__main__":
__lowerCAmelCase : List[Any] ={'a', 'b', 'c', 'd', 'e'}
__lowerCAmelCase : Optional[Any] ={'c', 'd', 'e', 'f', 'h', 'i'}
print(jaccard_similarity(set_a, set_b))
| 9 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : List[str] =logging.get_logger(__name__)
__lowerCAmelCase : Dict ={
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json',
'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json',
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json',
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json',
'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json',
'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json',
'cl-tohoku/bert-base-japanese-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'
),
'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = '''bert'''
def __init__( self :Tuple , lowerCAmelCase__ :str=30_522 , lowerCAmelCase__ :Tuple=768 , lowerCAmelCase__ :Dict=12 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :List[str]=3_072 , lowerCAmelCase__ :Any="gelu" , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :List[str]=512 , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :int=0.02 , lowerCAmelCase__ :int=1E-1_2 , lowerCAmelCase__ :str=0 , lowerCAmelCase__ :List[str]="absolute" , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :Optional[int]=None , **lowerCAmelCase__ :List[Any] , ) -> Optional[int]:
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = vocab_size
__SCREAMING_SNAKE_CASE : Any = hidden_size
__SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
__SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE : Optional[Any] = type_vocab_size
__SCREAMING_SNAKE_CASE : Tuple = initializer_range
__SCREAMING_SNAKE_CASE : int = layer_norm_eps
__SCREAMING_SNAKE_CASE : Optional[int] = position_embedding_type
__SCREAMING_SNAKE_CASE : int = use_cache
__SCREAMING_SNAKE_CASE : Optional[int] = classifier_dropout
class _lowercase ( A__ ):
'''simple docstring'''
@property
def __magic_name__( self :Any ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__SCREAMING_SNAKE_CASE : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 9 |
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
__lowerCAmelCase : Optional[int] ='\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
__lowerCAmelCase : Optional[Any] ='\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
__lowerCAmelCase : Dict ='\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def _UpperCamelCase ( lowercase__ , lowercase__ ):
return float((preds == labels).mean() )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = simple_accuracy(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE : List[str] = float(fa_score(y_true=lowercase__ , y_pred=lowercase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = np.array(lowercase__ )
__SCREAMING_SNAKE_CASE : str = np.array(lowercase__ )
__SCREAMING_SNAKE_CASE : str = en_sentvecs.shape[0]
# mean centering
__SCREAMING_SNAKE_CASE : Tuple = en_sentvecs - np.mean(lowercase__ , axis=0 )
__SCREAMING_SNAKE_CASE : Optional[int] = in_sentvecs - np.mean(lowercase__ , axis=0 )
__SCREAMING_SNAKE_CASE : str = cdist(lowercase__ , lowercase__ , '''cosine''' )
__SCREAMING_SNAKE_CASE : int = np.array(range(lowercase__ ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = sim.argsort(axis=1 )[:, :10]
__SCREAMING_SNAKE_CASE : str = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def __magic_name__( self :Tuple ) -> Tuple:
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
'''references''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if self.config_name != '''cvit-mkb-clsr''' else None , )
def __magic_name__( self :List[str] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple ) -> str:
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(lowerCAmelCase__ , lowerCAmelCase__ )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(lowerCAmelCase__ , lowerCAmelCase__ )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
| 9 | 1 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__lowerCAmelCase : Optional[Any] ='\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
__lowerCAmelCase : Any ='\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
__lowerCAmelCase : Optional[Any] =r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def __magic_name__( self :Optional[Any] ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def __magic_name__( self :Any , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Tuple = 0.0
for i, j in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
n_correct += 1.0 if math_equivalence.is_equiv(lowerCAmelCase__ , lowerCAmelCase__ ) else 0.0
__SCREAMING_SNAKE_CASE : str = n_correct / len(lowerCAmelCase__ )
return {
"accuracy": accuracy,
}
| 9 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = nn.functional.normalize(lowercase__ )
__SCREAMING_SNAKE_CASE : Tuple = nn.functional.normalize(lowercase__ )
return torch.mm(lowercase__ , normalized_text_embeds.t() )
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = CLIPConfig
SCREAMING_SNAKE_CASE__ : List[str] = ['''CLIPEncoderLayer''']
def __init__( self :str , lowerCAmelCase__ :CLIPConfig ) -> Tuple:
super().__init__(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = CLIPVisionModel(config.vision_config )
__SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.ones(17 ) , requires_grad=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = nn.Parameter(torch.ones(3 ) , requires_grad=lowerCAmelCase__ )
@torch.no_grad()
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[int] ) -> Dict:
__SCREAMING_SNAKE_CASE : int = self.vision_model(lowerCAmelCase__ )[1] # pooled_output
__SCREAMING_SNAKE_CASE : Optional[Any] = self.visual_projection(lowerCAmelCase__ )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__SCREAMING_SNAKE_CASE : Optional[Any] = cosine_distance(lowerCAmelCase__ , self.special_care_embeds ).cpu().float().numpy()
__SCREAMING_SNAKE_CASE : Optional[Any] = cosine_distance(lowerCAmelCase__ , self.concept_embeds ).cpu().float().numpy()
__SCREAMING_SNAKE_CASE : List[Any] = []
__SCREAMING_SNAKE_CASE : List[Any] = image_embeds.shape[0]
for i in range(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
__SCREAMING_SNAKE_CASE : List[Any] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
__SCREAMING_SNAKE_CASE : List[str] = special_cos_dist[i][concept_idx]
__SCREAMING_SNAKE_CASE : Any = self.special_care_embeds_weights[concept_idx].item()
__SCREAMING_SNAKE_CASE : Union[str, Any] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} )
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
__SCREAMING_SNAKE_CASE : int = cos_dist[i][concept_idx]
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.concept_embeds_weights[concept_idx].item()
__SCREAMING_SNAKE_CASE : Tuple = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(lowerCAmelCase__ )
result.append(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = [len(res['''bad_concepts'''] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :torch.FloatTensor ) -> Any:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.vision_model(lowerCAmelCase__ )[1] # pooled_output
__SCREAMING_SNAKE_CASE : List[str] = self.visual_projection(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = cosine_distance(lowerCAmelCase__ , self.special_care_embeds )
__SCREAMING_SNAKE_CASE : Optional[int] = cosine_distance(lowerCAmelCase__ , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
__SCREAMING_SNAKE_CASE : List[Any] = 0.0
__SCREAMING_SNAKE_CASE : Union[str, Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
__SCREAMING_SNAKE_CASE : List[str] = torch.any(special_scores > 0 , dim=1 )
__SCREAMING_SNAKE_CASE : List[str] = special_care * 0.01
__SCREAMING_SNAKE_CASE : int = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
__SCREAMING_SNAKE_CASE : Optional[int] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
__SCREAMING_SNAKE_CASE : Any = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 9 | 1 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class _lowercase :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : float
SCREAMING_SNAKE_CASE__ : TreeNode | None = None
SCREAMING_SNAKE_CASE__ : TreeNode | None = None
def _UpperCamelCase ( lowercase__ ):
# Validation
def is_valid_tree(lowercase__ ) -> bool:
if node is None:
return True
if not isinstance(lowercase__ , lowercase__ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(lowercase__ ):
raise ValueError(
'''Each node should be type of TreeNode and data should be float.''' )
def is_binary_search_tree_recursive_check(
lowercase__ , lowercase__ , lowercase__ ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , lowercase__ , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , lowercase__ )
)
return is_binary_search_tree_recursive_check(lowercase__ , -float('''inf''' ) , float('''inf''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__lowerCAmelCase : List[Any] =datasets.load_iris()
__lowerCAmelCase : Tuple =np.array(data['data'])
__lowerCAmelCase : Dict =np.array(data['target'])
__lowerCAmelCase : List[str] =data['target_names']
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : str =train_test_split(X, y)
def _UpperCamelCase ( lowercase__ , lowercase__ ):
return np.linalg.norm(np.array(lowercase__ ) - np.array(lowercase__ ) )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=5 ):
__SCREAMING_SNAKE_CASE : Optional[int] = zip(lowercase__ , lowercase__ )
# List of distances of all points from the point to be classified
__SCREAMING_SNAKE_CASE : Dict = []
for data_point in data:
__SCREAMING_SNAKE_CASE : Tuple = euclidean_distance(data_point[0] , lowercase__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
__SCREAMING_SNAKE_CASE : int = [i[1] for i in sorted(lowercase__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__SCREAMING_SNAKE_CASE : Any = Counter(lowercase__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 9 | 1 |
import os
from datetime import datetime as dt
from github import Github
__lowerCAmelCase : List[str] =[
'good first issue',
'feature request',
'wip',
]
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Any = Github(os.environ['''GITHUB_TOKEN'''] )
__SCREAMING_SNAKE_CASE : Any = g.get_repo('''huggingface/accelerate''' )
__SCREAMING_SNAKE_CASE : int = repo.get_issues(state='''open''' )
for issue in open_issues:
__SCREAMING_SNAKE_CASE : Optional[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda lowercase__ : i.created_at , reverse=lowercase__ )
__SCREAMING_SNAKE_CASE : Dict = comments[0] if len(lowercase__ ) > 0 else None
__SCREAMING_SNAKE_CASE : Tuple = dt.utcnow()
__SCREAMING_SNAKE_CASE : Tuple = (current_time - issue.updated_at).days
__SCREAMING_SNAKE_CASE : str = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='''closed''' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 9 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class _lowercase :
'''simple docstring'''
@staticmethod
def __magic_name__( *lowerCAmelCase__ :Union[str, Any] , **lowerCAmelCase__ :str ) -> Union[str, Any]:
pass
def _UpperCamelCase ( lowercase__ ):
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
__lowerCAmelCase : str =(
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def __magic_name__( self :Any , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Any ) -> Any:
__SCREAMING_SNAKE_CASE : Optional[int] = pipeline(
'''document-question-answering''' , model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = INVOICE_URL
__SCREAMING_SNAKE_CASE : Optional[Any] = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , '''''' ) ) )
__SCREAMING_SNAKE_CASE : str = '''What is the placebo?'''
__SCREAMING_SNAKE_CASE : str = [
{
'''image''': load_image(lowerCAmelCase__ ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Tuple ) -> str:
__SCREAMING_SNAKE_CASE : Any = dqa_pipeline(lowerCAmelCase__ , top_k=2 )
self.assertEqual(
lowerCAmelCase__ , [
[
{'''score''': ANY(lowerCAmelCase__ ), '''answer''': ANY(lowerCAmelCase__ ), '''start''': ANY(lowerCAmelCase__ ), '''end''': ANY(lowerCAmelCase__ )},
{'''score''': ANY(lowerCAmelCase__ ), '''answer''': ANY(lowerCAmelCase__ ), '''start''': ANY(lowerCAmelCase__ ), '''end''': ANY(lowerCAmelCase__ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def __magic_name__( self :Dict ) -> List[str]:
__SCREAMING_SNAKE_CASE : Tuple = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
__SCREAMING_SNAKE_CASE : Dict = INVOICE_URL
__SCREAMING_SNAKE_CASE : int = '''How many cats are there?'''
__SCREAMING_SNAKE_CASE : Optional[int] = [
{'''score''': 0.0001, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0001, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
__SCREAMING_SNAKE_CASE : Tuple = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , lowerCAmelCase__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__SCREAMING_SNAKE_CASE : Any = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__SCREAMING_SNAKE_CASE : List[Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(lowerCAmelCase__ , [] )
# We can optionnally pass directly the words and bounding boxes
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : Union[str, Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , words=lowerCAmelCase__ , boxes=lowerCAmelCase__ , top_k=2 )
self.assertEqual(lowerCAmelCase__ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __magic_name__( self :int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
__SCREAMING_SNAKE_CASE : Dict = INVOICE_URL
__SCREAMING_SNAKE_CASE : Any = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : Any = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : Tuple = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : List[Any] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __magic_name__( self :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : int = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = INVOICE_URL
__SCREAMING_SNAKE_CASE : Tuple = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : List[str] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : int = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : str = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __magic_name__( self :int ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=lowerCAmelCase__ , revision='''3dc6de3''' , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = INVOICE_URL
__SCREAMING_SNAKE_CASE : str = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : Dict = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
__SCREAMING_SNAKE_CASE : Optional[int] = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , '''''' ) ) )
# This model should also work if `image` is set to None
__SCREAMING_SNAKE_CASE : str = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __magic_name__( self :str ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=lowerCAmelCase__ , revision='''3dc6de3''' , max_seq_len=50 , )
__SCREAMING_SNAKE_CASE : List[str] = INVOICE_URL
__SCREAMING_SNAKE_CASE : Dict = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : List[Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : Optional[int] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
__SCREAMING_SNAKE_CASE : List[str] = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , '''''' ) ) )
# This model should also work if `image` is set to None
__SCREAMING_SNAKE_CASE : List[Any] = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def __magic_name__( self :Union[str, Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : str = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = INVOICE_URL
__SCREAMING_SNAKE_CASE : Optional[int] = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : Tuple = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def __magic_name__( self :Union[str, Any] ) -> Tuple:
pass
| 9 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase : Optional[int] ={'configuration_swin': ['SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SwinConfig', 'SwinOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str =[
'SWIN_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwinForImageClassification',
'SwinForMaskedImageModeling',
'SwinModel',
'SwinPreTrainedModel',
'SwinBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[str] =[
'TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSwinForImageClassification',
'TFSwinForMaskedImageModeling',
'TFSwinModel',
'TFSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : int =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 9 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : Any ={'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int =[
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 9 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : List[str] =logging.get_logger(__name__)
__lowerCAmelCase : str ={
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = '''falcon'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ['''past_key_values''']
def __init__( self :Any , lowerCAmelCase__ :Union[str, Any]=65_024 , lowerCAmelCase__ :Optional[Any]=4_544 , lowerCAmelCase__ :int=32 , lowerCAmelCase__ :Union[str, Any]=71 , lowerCAmelCase__ :Optional[Any]=1E-5 , lowerCAmelCase__ :Optional[int]=0.02 , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :int=0.0 , lowerCAmelCase__ :List[Any]=0.0 , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :Optional[int]=False , lowerCAmelCase__ :Optional[Any]=False , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Any=False , lowerCAmelCase__ :Tuple=11 , lowerCAmelCase__ :List[Any]=11 , **lowerCAmelCase__ :Union[str, Any] , ) -> Tuple:
__SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
# Backward compatibility with n_embed kwarg
__SCREAMING_SNAKE_CASE : List[str] = kwargs.pop('''n_embed''' , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = hidden_size if n_embed is None else n_embed
__SCREAMING_SNAKE_CASE : Dict = num_hidden_layers
__SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
__SCREAMING_SNAKE_CASE : str = layer_norm_epsilon
__SCREAMING_SNAKE_CASE : Optional[int] = initializer_range
__SCREAMING_SNAKE_CASE : Optional[Any] = use_cache
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout
__SCREAMING_SNAKE_CASE : List[Any] = attention_dropout
__SCREAMING_SNAKE_CASE : List[Any] = bos_token_id
__SCREAMING_SNAKE_CASE : Optional[int] = eos_token_id
__SCREAMING_SNAKE_CASE : List[str] = num_attention_heads if num_kv_heads is None else num_kv_heads
__SCREAMING_SNAKE_CASE : Dict = alibi
__SCREAMING_SNAKE_CASE : Any = new_decoder_architecture
__SCREAMING_SNAKE_CASE : Union[str, Any] = multi_query # Ignored when new_decoder_architecture is True
__SCREAMING_SNAKE_CASE : Dict = parallel_attn
__SCREAMING_SNAKE_CASE : List[str] = bias
super().__init__(bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def __magic_name__( self :Any ) -> Dict:
return self.hidden_size // self.num_attention_heads
@property
def __magic_name__( self :Tuple ) -> List[str]:
return not self.alibi
| 9 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :int , lowerCAmelCase__ :NestedDataStructureLike[PathLike] , lowerCAmelCase__ :Optional[NamedSplit] = None , lowerCAmelCase__ :Optional[Features] = None , lowerCAmelCase__ :str = None , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[int] = None , **lowerCAmelCase__ :Optional[int] , ) -> Tuple:
super().__init__(
lowerCAmelCase__ , split=lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ , streaming=lowerCAmelCase__ , num_proc=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : List[str] = path_or_paths if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else {self.split: path_or_paths}
__SCREAMING_SNAKE_CASE : int = Text(
cache_dir=lowerCAmelCase__ , data_files=lowerCAmelCase__ , features=lowerCAmelCase__ , **lowerCAmelCase__ , )
def __magic_name__( self :Dict ) -> Tuple:
# Build iterable dataset
if self.streaming:
__SCREAMING_SNAKE_CASE : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__SCREAMING_SNAKE_CASE : List[str] = None
__SCREAMING_SNAKE_CASE : str = None
__SCREAMING_SNAKE_CASE : Dict = None
__SCREAMING_SNAKE_CASE : Tuple = None
self.builder.download_and_prepare(
download_config=lowerCAmelCase__ , download_mode=lowerCAmelCase__ , verification_mode=lowerCAmelCase__ , base_path=lowerCAmelCase__ , num_proc=self.num_proc , )
__SCREAMING_SNAKE_CASE : Optional[int] = self.builder.as_dataset(
split=self.split , verification_mode=lowerCAmelCase__ , in_memory=self.keep_in_memory )
return dataset
| 9 | 1 |
class _lowercase :
'''simple docstring'''
def __init__( self :Optional[int] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : List[Any] = name
__SCREAMING_SNAKE_CASE : Any = value
__SCREAMING_SNAKE_CASE : List[Any] = weight
def __repr__( self :List[Any] ) -> List[str]:
return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def __magic_name__( self :Union[str, Any] ) -> List[str]:
return self.value
def __magic_name__( self :Any ) -> Dict:
return self.name
def __magic_name__( self :str ) -> int:
return self.weight
def __magic_name__( self :Optional[int] ) -> Tuple:
return self.value / self.weight
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
for i in range(len(lowercase__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = sorted(lowercase__ , key=lowercase__ , reverse=lowercase__ )
__SCREAMING_SNAKE_CASE : int = []
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = 0.0, 0.0
for i in range(len(lowercase__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def _UpperCamelCase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Dict = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
__SCREAMING_SNAKE_CASE : List[str] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert('''RGB''' )
return image
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = dct.pop(lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = val
def _UpperCamelCase ( lowercase__ , lowercase__ ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
__SCREAMING_SNAKE_CASE : int = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__SCREAMING_SNAKE_CASE : Optional[int] = torch.cat((q_bias, torch.zeros_like(lowercase__ , requires_grad=lowercase__ ), v_bias) )
__SCREAMING_SNAKE_CASE : Optional[Any] = qkv_bias
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Any = 364 if '''coco''' in model_name else 224
__SCREAMING_SNAKE_CASE : List[str] = BlipaVisionConfig(image_size=lowercase__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=lowercase__ ).to_dict()
elif "opt-6.7b" in model_name:
__SCREAMING_SNAKE_CASE : List[Any] = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=lowercase__ ).to_dict()
elif "t5-xl" in model_name:
__SCREAMING_SNAKE_CASE : Optional[Any] = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
__SCREAMING_SNAKE_CASE : Optional[int] = BlipaConfig(vision_config=lowercase__ , text_config=lowercase__ )
return config, image_size
@torch.no_grad()
def _UpperCamelCase ( lowercase__ , lowercase__=None , lowercase__=False ):
__SCREAMING_SNAKE_CASE : Any = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
__SCREAMING_SNAKE_CASE : str = tokenizer('''\n''' , add_special_tokens=lowercase__ ).input_ids[0]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = get_blipa_config(lowercase__ , eos_token_id=lowercase__ )
__SCREAMING_SNAKE_CASE : int = BlipaForConditionalGeneration(lowercase__ ).eval()
__SCREAMING_SNAKE_CASE : int = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
__SCREAMING_SNAKE_CASE : List[str] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = load_model_and_preprocess(
name=lowercase__ , model_type=lowercase__ , is_eval=lowercase__ , device=lowercase__ )
original_model.eval()
print('''Done!''' )
# update state dict keys
__SCREAMING_SNAKE_CASE : List[str] = original_model.state_dict()
__SCREAMING_SNAKE_CASE : Optional[int] = create_rename_keys(lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__SCREAMING_SNAKE_CASE : Tuple = state_dict.pop(lowercase__ )
if key.startswith('''Qformer.bert''' ):
__SCREAMING_SNAKE_CASE : List[str] = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
__SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
__SCREAMING_SNAKE_CASE : Dict = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
__SCREAMING_SNAKE_CASE : Tuple = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
__SCREAMING_SNAKE_CASE : List[str] = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
__SCREAMING_SNAKE_CASE : Tuple = key.replace('''t5''' , '''language''' )
__SCREAMING_SNAKE_CASE : Tuple = val
# read in qv biases
read_in_q_v_bias(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = hf_model.load_state_dict(lowercase__ , strict=lowercase__ )
assert len(lowercase__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__SCREAMING_SNAKE_CASE : List[str] = load_demo_image()
__SCREAMING_SNAKE_CASE : Any = vis_processors['''eval'''](lowercase__ ).unsqueeze(0 ).to(lowercase__ )
__SCREAMING_SNAKE_CASE : str = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(lowercase__ )
# create processor
__SCREAMING_SNAKE_CASE : List[Any] = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=lowercase__ , image_std=lowercase__ )
__SCREAMING_SNAKE_CASE : int = BlipaProcessor(image_processor=lowercase__ , tokenizer=lowercase__ )
__SCREAMING_SNAKE_CASE : Any = processor(images=lowercase__ , return_tensors='''pt''' ).pixel_values.to(lowercase__ )
# make sure processor creates exact same pixel values
assert torch.allclose(lowercase__ , lowercase__ )
original_model.to(lowercase__ )
hf_model.to(lowercase__ )
with torch.no_grad():
if "opt" in model_name:
__SCREAMING_SNAKE_CASE : Dict = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
__SCREAMING_SNAKE_CASE : Dict = hf_model(lowercase__ , lowercase__ ).logits
else:
__SCREAMING_SNAKE_CASE : int = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
__SCREAMING_SNAKE_CASE : List[Any] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
__SCREAMING_SNAKE_CASE : Optional[int] = hf_model(lowercase__ , lowercase__ , labels=lowercase__ ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__SCREAMING_SNAKE_CASE : Dict = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=lowercase__ )
assert torch.allclose(logits[0, :3, :3] , lowercase__ , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__SCREAMING_SNAKE_CASE : Any = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=lowercase__ )
else:
# cast to same type
__SCREAMING_SNAKE_CASE : Optional[Any] = logits.dtype
assert torch.allclose(original_logits.to(lowercase__ ) , lowercase__ , atol=1e-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
__SCREAMING_SNAKE_CASE : Any = ''''''
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(lowercase__ , return_tensors='''pt''' ).input_ids.to(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = original_model.generate({'''image''': original_pixel_values} )
__SCREAMING_SNAKE_CASE : Union[str, Any] = hf_model.generate(
lowercase__ , lowercase__ , do_sample=lowercase__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = input_ids.shape[1]
__SCREAMING_SNAKE_CASE : Any = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = [text.strip() for text in output_text]
print('''HF generation:''' , lowercase__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(lowercase__ )
hf_model.save_pretrained(lowercase__ )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
__lowerCAmelCase : List[str] =argparse.ArgumentParser()
__lowerCAmelCase : Tuple =[
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
__lowerCAmelCase : List[Any] =parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 9 | 1 |
from math import factorial
def _UpperCamelCase ( lowercase__ = 20 ):
__SCREAMING_SNAKE_CASE : Dict = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
__SCREAMING_SNAKE_CASE : str = n // 2
return int(factorial(lowercase__ ) / (factorial(lowercase__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(2_0))
else:
try:
__lowerCAmelCase : Optional[int] =int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 9 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
__lowerCAmelCase : Optional[Any] ={'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : List[str] ={
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
__lowerCAmelCase : Optional[int] ={
'gpt-neox-20b': 2_0_4_8,
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Optional[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self :int , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :List[Any]=None , lowerCAmelCase__ :str="<|endoftext|>" , lowerCAmelCase__ :str="<|endoftext|>" , lowerCAmelCase__ :Dict="<|endoftext|>" , lowerCAmelCase__ :Union[str, Any]=False , **lowerCAmelCase__ :List[str] , ) -> Any:
super().__init__(
lowerCAmelCase__ , lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , lowerCAmelCase__ ) != add_prefix_space:
__SCREAMING_SNAKE_CASE : List[str] = getattr(lowerCAmelCase__ , pre_tok_state.pop('''type''' ) )
__SCREAMING_SNAKE_CASE : str = add_prefix_space
__SCREAMING_SNAKE_CASE : Any = pre_tok_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = add_prefix_space
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]:
__SCREAMING_SNAKE_CASE : List[str] = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :"Conversation" ) -> List[int]:
__SCREAMING_SNAKE_CASE : Optional[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) + [self.eos_token_id] )
if len(lowerCAmelCase__ ) > self.model_max_length:
__SCREAMING_SNAKE_CASE : List[str] = input_ids[-self.model_max_length :]
return input_ids
| 9 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase : str ={
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[int] =['OwlViTFeatureExtractor']
__lowerCAmelCase : str =['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int =[
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
__lowerCAmelCase : Optional[int] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 9 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__lowerCAmelCase : Optional[Any] ='\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
__lowerCAmelCase : Any ='\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
__lowerCAmelCase : Optional[Any] =r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def __magic_name__( self :Optional[Any] ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def __magic_name__( self :Any , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Tuple = 0.0
for i, j in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
n_correct += 1.0 if math_equivalence.is_equiv(lowerCAmelCase__ , lowerCAmelCase__ ) else 0.0
__SCREAMING_SNAKE_CASE : str = n_correct / len(lowerCAmelCase__ )
return {
"accuracy": accuracy,
}
| 9 | 1 |
import random
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = False ):
__SCREAMING_SNAKE_CASE : dict = {i: [] for i in range(lowercase__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(lowercase__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(lowercase__ ):
for j in range(i + 1 , lowercase__ ):
if random.random() < probability:
graph[i].append(lowercase__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(lowercase__ )
return graph
def _UpperCamelCase ( lowercase__ ):
return {
i: [j for j in range(lowercase__ ) if i != j] for i in range(lowercase__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _lowercase :
'''simple docstring'''
def __init__( self :Optional[int] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :int=13 , lowerCAmelCase__ :List[str]=7 , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :str=True , lowerCAmelCase__ :List[Any]=99 , lowerCAmelCase__ :List[str]=32 , lowerCAmelCase__ :Any=5 , lowerCAmelCase__ :List[str]=4 , lowerCAmelCase__ :int=37 , lowerCAmelCase__ :Optional[int]="gelu" , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Optional[Any]=512 , lowerCAmelCase__ :Union[str, Any]=16 , lowerCAmelCase__ :Dict=2 , lowerCAmelCase__ :Tuple=0.02 , lowerCAmelCase__ :List[Any]=3 , lowerCAmelCase__ :Tuple=4 , lowerCAmelCase__ :int=None , ) -> int:
__SCREAMING_SNAKE_CASE : Dict = parent
__SCREAMING_SNAKE_CASE : Any = batch_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = seq_length
__SCREAMING_SNAKE_CASE : Optional[Any] = is_training
__SCREAMING_SNAKE_CASE : int = use_token_type_ids
__SCREAMING_SNAKE_CASE : Any = use_labels
__SCREAMING_SNAKE_CASE : Any = vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = hidden_size
__SCREAMING_SNAKE_CASE : int = num_hidden_layers
__SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : str = intermediate_size
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
__SCREAMING_SNAKE_CASE : List[str] = type_sequence_label_size
__SCREAMING_SNAKE_CASE : List[str] = initializer_range
__SCREAMING_SNAKE_CASE : Tuple = num_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_choices
__SCREAMING_SNAKE_CASE : Union[str, Any] = scope
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.vocab_size - 1
def __magic_name__( self :Optional[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE : Dict = None
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE : Optional[int] = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__SCREAMING_SNAKE_CASE : Any = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __magic_name__( self :Tuple , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Any , *lowerCAmelCase__ :Union[str, Any] ) -> Any:
__SCREAMING_SNAKE_CASE : Any = OpenAIGPTModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Dict = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , head_mask=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Dict , *lowerCAmelCase__ :List[Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = OpenAIGPTLMHeadModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__( self :Tuple , lowerCAmelCase__ :Dict , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , *lowerCAmelCase__ :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : Any = OpenAIGPTDoubleHeadsModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Any = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__( self :Dict , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :str , *lowerCAmelCase__ :Optional[int] ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
__SCREAMING_SNAKE_CASE : List[Any] = OpenAIGPTForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__( self :Optional[Any] ) -> str:
__SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) : List[str] = config_and_inputs
__SCREAMING_SNAKE_CASE : List[str] = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class _lowercase ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : str = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
SCREAMING_SNAKE_CASE__ : str = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :str , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any] ) -> Tuple:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def __magic_name__( self :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :int , lowerCAmelCase__ :int=False ) -> Dict:
__SCREAMING_SNAKE_CASE : Tuple = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__SCREAMING_SNAKE_CASE : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Tuple = inputs_dict['''labels''']
__SCREAMING_SNAKE_CASE : Dict = inputs_dict['''labels''']
__SCREAMING_SNAKE_CASE : List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
return inputs_dict
def __magic_name__( self :Optional[int] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : int = OpenAIGPTModelTester(self )
__SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase__ , n_embd=37 )
def __magic_name__( self :Any ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __magic_name__( self :List[str] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowerCAmelCase__ )
def __magic_name__( self :int ) -> int:
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> Dict:
__SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> str:
__SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowerCAmelCase__ )
@slow
def __magic_name__( self :Any ) -> List[Any]:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Dict = OpenAIGPTModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __magic_name__( self :Union[str, Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[str] = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[481, 4_735, 544]] , dtype=torch.long , device=lowerCAmelCase__ ) # the president is
__SCREAMING_SNAKE_CASE : Dict = [
481,
4_735,
544,
246,
963,
870,
762,
239,
244,
40_477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__SCREAMING_SNAKE_CASE : Dict = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ )
self.assertListEqual(output_ids[0].tolist() , lowerCAmelCase__ )
| 9 | 1 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = CTRLTokenizer
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : List[str] = False
def __magic_name__( self :List[Any] ) -> Optional[int]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__SCREAMING_SNAKE_CASE : Any = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
__SCREAMING_SNAKE_CASE : Optional[int] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__SCREAMING_SNAKE_CASE : Dict = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
__SCREAMING_SNAKE_CASE : Optional[int] = {'''unk_token''': '''<unk>'''}
__SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__SCREAMING_SNAKE_CASE : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCAmelCase__ ) )
def __magic_name__( self :List[str] , **lowerCAmelCase__ :int ) -> str:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __magic_name__( self :List[str] , lowerCAmelCase__ :Optional[int] ) -> List[str]:
__SCREAMING_SNAKE_CASE : Optional[Any] = '''adapt react readapt apt'''
__SCREAMING_SNAKE_CASE : Optional[Any] = '''adapt react readapt apt'''
return input_text, output_text
def __magic_name__( self :Optional[int] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__SCREAMING_SNAKE_CASE : Optional[int] = '''adapt react readapt apt'''
__SCREAMING_SNAKE_CASE : Dict = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
__SCREAMING_SNAKE_CASE : str = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = tokens + [tokenizer.unk_token]
__SCREAMING_SNAKE_CASE : List[str] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
| 9 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _UpperCamelCase ( *lowercase__ , lowercase__ = None , lowercase__=True , lowercase__=2 ):
from .. import __version__
__SCREAMING_SNAKE_CASE : Optional[Any] = take_from
__SCREAMING_SNAKE_CASE : List[str] = ()
if not isinstance(args[0] , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowercase__ ).base_version ) >= version.parse(lowercase__ ):
raise ValueError(
F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
F''' version {__version__} is >= {version_name}''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if isinstance(lowercase__ , lowercase__ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowercase__ ),)
__SCREAMING_SNAKE_CASE : List[Any] = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(lowercase__ , lowercase__ ):
values += (getattr(lowercase__ , lowercase__ ),)
__SCREAMING_SNAKE_CASE : List[str] = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
__SCREAMING_SNAKE_CASE : str = F'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
__SCREAMING_SNAKE_CASE : Any = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , lowercase__ , stacklevel=lowercase__ )
if isinstance(lowercase__ , lowercase__ ) and len(lowercase__ ) > 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1]
__SCREAMING_SNAKE_CASE : Dict = call_frame.filename
__SCREAMING_SNAKE_CASE : Optional[Any] = call_frame.lineno
__SCREAMING_SNAKE_CASE : int = call_frame.function
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(lowercase__ ) == 0:
return
elif len(lowercase__ ) == 1:
return values[0]
return values
| 9 | 1 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__lowerCAmelCase : str =logging.get_logger(__name__)
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = ['''input_features''', '''is_longer''']
def __init__( self :Union[str, Any] , lowerCAmelCase__ :int=64 , lowerCAmelCase__ :Tuple=48_000 , lowerCAmelCase__ :List[str]=480 , lowerCAmelCase__ :Optional[Any]=10 , lowerCAmelCase__ :List[Any]=1_024 , lowerCAmelCase__ :str=0.0 , lowerCAmelCase__ :Tuple=False , lowerCAmelCase__ :float = 0 , lowerCAmelCase__ :float = 14_000 , lowerCAmelCase__ :int = None , lowerCAmelCase__ :str = "fusion" , lowerCAmelCase__ :str = "repeatpad" , **lowerCAmelCase__ :List[Any] , ) -> Tuple:
super().__init__(
feature_size=lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , padding_value=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Optional[Any] = top_db
__SCREAMING_SNAKE_CASE : Union[str, Any] = truncation
__SCREAMING_SNAKE_CASE : List[Any] = padding
__SCREAMING_SNAKE_CASE : Any = fft_window_size
__SCREAMING_SNAKE_CASE : List[Any] = (fft_window_size >> 1) + 1
__SCREAMING_SNAKE_CASE : Dict = hop_length
__SCREAMING_SNAKE_CASE : List[Any] = max_length_s
__SCREAMING_SNAKE_CASE : Optional[Any] = max_length_s * sampling_rate
__SCREAMING_SNAKE_CASE : Any = sampling_rate
__SCREAMING_SNAKE_CASE : Tuple = frequency_min
__SCREAMING_SNAKE_CASE : Optional[int] = frequency_max
__SCREAMING_SNAKE_CASE : int = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCAmelCase__ , min_frequency=lowerCAmelCase__ , max_frequency=lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , norm=lowerCAmelCase__ , mel_scale='''htk''' , )
__SCREAMING_SNAKE_CASE : Optional[int] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCAmelCase__ , min_frequency=lowerCAmelCase__ , max_frequency=lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , norm='''slaney''' , mel_scale='''slaney''' , )
def __magic_name__( self :Any ) -> Dict[str, Any]:
__SCREAMING_SNAKE_CASE : str = copy.deepcopy(self.__dict__ )
__SCREAMING_SNAKE_CASE : int = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :np.array , lowerCAmelCase__ :Optional[np.array] = None ) -> np.ndarray:
__SCREAMING_SNAKE_CASE : Optional[Any] = spectrogram(
lowerCAmelCase__ , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=lowerCAmelCase__ , log_mel='''dB''' , )
return log_mel_spectrogram.T
def __magic_name__( self :Dict , lowerCAmelCase__ :str , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[Any] = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
__SCREAMING_SNAKE_CASE : Any = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
__SCREAMING_SNAKE_CASE : Tuple = [0]
# randomly choose index for each part
__SCREAMING_SNAKE_CASE : int = np.random.choice(ranges[0] )
__SCREAMING_SNAKE_CASE : Dict = np.random.choice(ranges[1] )
__SCREAMING_SNAKE_CASE : Tuple = np.random.choice(ranges[2] )
__SCREAMING_SNAKE_CASE : List[str] = mel[idx_front : idx_front + chunk_frames, :]
__SCREAMING_SNAKE_CASE : Tuple = mel[idx_middle : idx_middle + chunk_frames, :]
__SCREAMING_SNAKE_CASE : List[str] = mel[idx_back : idx_back + chunk_frames, :]
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(mel[None, None, :] )
__SCREAMING_SNAKE_CASE : Tuple = torch.nn.functional.interpolate(
lowerCAmelCase__ , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = mel_shrink[0][0].numpy()
__SCREAMING_SNAKE_CASE : Optional[int] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :np.array , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Tuple ) -> np.array:
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
__SCREAMING_SNAKE_CASE : int = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
__SCREAMING_SNAKE_CASE : List[Any] = len(lowerCAmelCase__ ) - max_length
__SCREAMING_SNAKE_CASE : int = np.random.randint(0 , overflow + 1 )
__SCREAMING_SNAKE_CASE : int = waveform[idx : idx + max_length]
__SCREAMING_SNAKE_CASE : List[Any] = self._np_extract_fbank_features(lowerCAmelCase__ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
__SCREAMING_SNAKE_CASE : Union[str, Any] = self._np_extract_fbank_features(lowerCAmelCase__ , self.mel_filters )
__SCREAMING_SNAKE_CASE : Tuple = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
__SCREAMING_SNAKE_CASE : str = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
__SCREAMING_SNAKE_CASE : Dict = np.stack([mel, mel, mel, mel] , axis=0 )
__SCREAMING_SNAKE_CASE : int = False
else:
__SCREAMING_SNAKE_CASE : List[Any] = self._random_mel_fusion(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = True
else:
raise NotImplementedError(f'''data_truncating {truncation} not implemented''' )
else:
__SCREAMING_SNAKE_CASE : List[Any] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
__SCREAMING_SNAKE_CASE : Optional[int] = int(max_length / len(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Dict = np.stack(np.tile(lowerCAmelCase__ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
__SCREAMING_SNAKE_CASE : List[Any] = int(max_length / len(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = np.stack(np.tile(lowerCAmelCase__ , lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : str = np.pad(lowerCAmelCase__ , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
__SCREAMING_SNAKE_CASE : Optional[Any] = self._np_extract_fbank_features(lowerCAmelCase__ , self.mel_filters )
__SCREAMING_SNAKE_CASE : Tuple = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
__SCREAMING_SNAKE_CASE : Dict = self._np_extract_fbank_features(lowerCAmelCase__ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self :Tuple , lowerCAmelCase__ :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCAmelCase__ :str = None , lowerCAmelCase__ :Optional[str] = None , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :Optional[Union[str, TensorType]] = None , **lowerCAmelCase__ :Dict , ) -> BatchFeature:
__SCREAMING_SNAKE_CASE : Tuple = truncation if truncation is not None else self.truncation
__SCREAMING_SNAKE_CASE : Dict = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = isinstance(lowerCAmelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
__SCREAMING_SNAKE_CASE : Dict = is_batched_numpy or (
isinstance(lowerCAmelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__SCREAMING_SNAKE_CASE : List[Any] = [np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase__ , np.ndarray ):
__SCREAMING_SNAKE_CASE : Any = np.asarray(lowerCAmelCase__ , dtype=np.floataa )
elif isinstance(lowerCAmelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__SCREAMING_SNAKE_CASE : Any = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__SCREAMING_SNAKE_CASE : int = [np.asarray(lowerCAmelCase__ )]
# convert to mel spectrogram, truncate and pad if needed.
__SCREAMING_SNAKE_CASE : int = [
self._get_input_mel(lowerCAmelCase__ , max_length if max_length else self.nb_max_samples , lowerCAmelCase__ , lowerCAmelCase__ )
for waveform in raw_speech
]
__SCREAMING_SNAKE_CASE : Dict = []
__SCREAMING_SNAKE_CASE : Tuple = []
for mel, longer in padded_inputs:
input_mel.append(lowerCAmelCase__ )
is_longer.append(lowerCAmelCase__ )
if truncation == "fusion" and sum(lowerCAmelCase__ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
__SCREAMING_SNAKE_CASE : int = np.random.randint(0 , len(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : str = True
if isinstance(input_mel[0] , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : int = [np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
__SCREAMING_SNAKE_CASE : str = [[longer] for longer in is_longer]
__SCREAMING_SNAKE_CASE : Dict = {'''input_features''': input_mel, '''is_longer''': is_longer}
__SCREAMING_SNAKE_CASE : Union[str, Any] = BatchFeature(lowerCAmelCase__ )
if return_tensors is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = input_features.convert_to_tensors(lowerCAmelCase__ )
return input_features
| 9 |
from __future__ import annotations
import bisect
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 0 , lowercase__ = -1 ):
if hi < 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = len(lowercase__ )
while lo < hi:
__SCREAMING_SNAKE_CASE : Any = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__SCREAMING_SNAKE_CASE : Union[str, Any] = mid + 1
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = mid
return lo
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 0 , lowercase__ = -1 ):
if hi < 0:
__SCREAMING_SNAKE_CASE : List[Any] = len(lowercase__ )
while lo < hi:
__SCREAMING_SNAKE_CASE : Optional[int] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__SCREAMING_SNAKE_CASE : Any = mid + 1
else:
__SCREAMING_SNAKE_CASE : Optional[int] = mid
return lo
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 0 , lowercase__ = -1 ):
sorted_collection.insert(bisect_left(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) , lowercase__ )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 0 , lowercase__ = -1 ):
sorted_collection.insert(bisect_right(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) , lowercase__ )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Any = 0
__SCREAMING_SNAKE_CASE : List[Any] = len(lowercase__ ) - 1
while left <= right:
__SCREAMING_SNAKE_CASE : str = left + (right - left) // 2
__SCREAMING_SNAKE_CASE : List[str] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__SCREAMING_SNAKE_CASE : int = midpoint - 1
else:
__SCREAMING_SNAKE_CASE : Dict = midpoint + 1
return None
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = bisect.bisect_left(lowercase__ , lowercase__ )
if index != len(lowercase__ ) and sorted_collection[index] == item:
return index
return None
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
if right < left:
return None
__SCREAMING_SNAKE_CASE : int = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(lowercase__ , lowercase__ , lowercase__ , midpoint - 1 )
else:
return binary_search_by_recursion(lowercase__ , lowercase__ , midpoint + 1 , lowercase__ )
if __name__ == "__main__":
__lowerCAmelCase : Dict =input('Enter numbers separated by comma:\n').strip()
__lowerCAmelCase : str =sorted(int(item) for item in user_input.split(','))
__lowerCAmelCase : Tuple =int(input('Enter a single number to be found in the list:\n'))
__lowerCAmelCase : Tuple =binary_search(collection, target)
if result is None:
print(f"""{target} was not found in {collection}.""")
else:
print(f"""{target} was found at position {result} in {collection}.""")
| 9 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self :Any , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple=7 , lowerCAmelCase__ :List[Any]=3 , lowerCAmelCase__ :Any=10 , lowerCAmelCase__ :Optional[int]=18 , lowerCAmelCase__ :Dict=30 , lowerCAmelCase__ :Tuple=400 , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :str=True , lowerCAmelCase__ :List[str]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :List[str]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :Optional[Any]=None , ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Dict = size if size is not None else {'''shortest_edge''': 18}
__SCREAMING_SNAKE_CASE : Optional[int] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__SCREAMING_SNAKE_CASE : Tuple = parent
__SCREAMING_SNAKE_CASE : List[Any] = batch_size
__SCREAMING_SNAKE_CASE : List[str] = num_channels
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_frames
__SCREAMING_SNAKE_CASE : Tuple = image_size
__SCREAMING_SNAKE_CASE : Optional[Any] = min_resolution
__SCREAMING_SNAKE_CASE : Any = max_resolution
__SCREAMING_SNAKE_CASE : List[Any] = do_resize
__SCREAMING_SNAKE_CASE : Optional[Any] = size
__SCREAMING_SNAKE_CASE : Optional[int] = do_normalize
__SCREAMING_SNAKE_CASE : List[Any] = image_mean
__SCREAMING_SNAKE_CASE : List[str] = image_std
__SCREAMING_SNAKE_CASE : str = crop_size
def __magic_name__( self :Tuple ) -> Any:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = VivitImageProcessor if is_vision_available() else None
def __magic_name__( self :List[str] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = VivitImageProcessingTester(self )
@property
def __magic_name__( self :int ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__( self :List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_std''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''size''' ) )
def __magic_name__( self :Optional[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def __magic_name__( self :List[Any] ) -> Union[str, Any]:
# Initialize image_processing
__SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
__SCREAMING_SNAKE_CASE : List[Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for video in video_inputs:
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE : List[str] = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __magic_name__( self :str ) -> int:
# Initialize image_processing
__SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__SCREAMING_SNAKE_CASE : List[str] = prepare_video_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for video in video_inputs:
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
__SCREAMING_SNAKE_CASE : Optional[int] = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : Any = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __magic_name__( self :Any ) -> List[str]:
# Initialize image_processing
__SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE : Optional[int] = prepare_video_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for video in video_inputs:
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
__SCREAMING_SNAKE_CASE : Tuple = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : Optional[int] = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 9 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Union[str, Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : str = '''ylacombe/bark-small'''
__SCREAMING_SNAKE_CASE : Optional[int] = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : str = '''en_speaker_1'''
__SCREAMING_SNAKE_CASE : Any = '''This is a test string'''
__SCREAMING_SNAKE_CASE : int = '''speaker_embeddings_path.json'''
__SCREAMING_SNAKE_CASE : int = '''speaker_embeddings'''
def __magic_name__( self :List[str] , **lowerCAmelCase__ :Union[str, Any] ) -> Any:
return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> int:
shutil.rmtree(self.tmpdirname )
def __magic_name__( self :Dict ) -> str:
__SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Tuple = BarkProcessor(tokenizer=lowerCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : Optional[Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __magic_name__( self :Tuple ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Dict = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
__SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __magic_name__( self :List[str] ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
__SCREAMING_SNAKE_CASE : str = 35
__SCREAMING_SNAKE_CASE : str = 2
__SCREAMING_SNAKE_CASE : List[Any] = 8
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''semantic_prompt''': np.ones(lowerCAmelCase__ ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
__SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=self.input_string , voice_preset=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
__SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = processor(text=self.input_string , voice_preset=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
__SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def __magic_name__( self :Tuple ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Any = BarkProcessor(tokenizer=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = processor(text=self.input_string )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer(
self.input_string , padding='''max_length''' , max_length=256 , add_special_tokens=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 9 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
__lowerCAmelCase : Dict ={
'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json',
}
class _lowercase ( A__ , A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = '''bit'''
SCREAMING_SNAKE_CASE__ : Tuple = ['''preactivation''', '''bottleneck''']
SCREAMING_SNAKE_CASE__ : List[str] = ['''SAME''', '''VALID''']
def __init__( self :str , lowerCAmelCase__ :Any=3 , lowerCAmelCase__ :Optional[Any]=64 , lowerCAmelCase__ :str=[256, 512, 1_024, 2_048] , lowerCAmelCase__ :List[str]=[3, 4, 6, 3] , lowerCAmelCase__ :str="preactivation" , lowerCAmelCase__ :str="relu" , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :int=32 , lowerCAmelCase__ :str=0.0 , lowerCAmelCase__ :List[str]=False , lowerCAmelCase__ :Union[str, Any]=32 , lowerCAmelCase__ :List[Any]=1 , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :Optional[int]=None , **lowerCAmelCase__ :Union[str, Any] , ) -> Optional[Any]:
super().__init__(**lowerCAmelCase__ )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
__SCREAMING_SNAKE_CASE : Tuple = global_padding.upper()
else:
raise ValueError(f'''Padding strategy {global_padding} not supported''' )
__SCREAMING_SNAKE_CASE : Optional[int] = num_channels
__SCREAMING_SNAKE_CASE : str = embedding_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_sizes
__SCREAMING_SNAKE_CASE : Union[str, Any] = depths
__SCREAMING_SNAKE_CASE : Tuple = layer_type
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
__SCREAMING_SNAKE_CASE : Optional[int] = global_padding
__SCREAMING_SNAKE_CASE : Optional[int] = num_groups
__SCREAMING_SNAKE_CASE : List[Any] = drop_path_rate
__SCREAMING_SNAKE_CASE : Tuple = embedding_dynamic_padding
__SCREAMING_SNAKE_CASE : Union[str, Any] = output_stride
__SCREAMING_SNAKE_CASE : List[str] = width_factor
__SCREAMING_SNAKE_CASE : List[Any] = ['''stem'''] + [f'''stage{idx}''' for idx in range(1 , len(lowerCAmelCase__ ) + 1 )]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase__ , out_indices=lowerCAmelCase__ , stage_names=self.stage_names )
| 9 |
from importlib import import_module
from .logging import get_logger
__lowerCAmelCase : str =get_logger(__name__)
class _lowercase :
'''simple docstring'''
def __init__( self :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :str=None ) -> int:
__SCREAMING_SNAKE_CASE : List[str] = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('''__''' ):
setattr(self , lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = module._original_module if isinstance(lowerCAmelCase__ , _PatchedModuleObj ) else module
class _lowercase :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = []
def __init__( self :Tuple , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Any , lowerCAmelCase__ :Dict=None ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = obj
__SCREAMING_SNAKE_CASE : str = target
__SCREAMING_SNAKE_CASE : Dict = new
__SCREAMING_SNAKE_CASE : Union[str, Any] = target.split('''.''' )[0]
__SCREAMING_SNAKE_CASE : List[str] = {}
__SCREAMING_SNAKE_CASE : Tuple = attrs or []
def __enter__( self :int ) -> Dict:
*__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = self.target.split('''.''' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowerCAmelCase__ ) ):
try:
__SCREAMING_SNAKE_CASE : Any = import_module('''.'''.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
__SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(self.obj , lowerCAmelCase__ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowerCAmelCase__ , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
__SCREAMING_SNAKE_CASE : int = obj_attr
# patch at top level
setattr(self.obj , lowerCAmelCase__ , _PatchedModuleObj(lowerCAmelCase__ , attrs=self.attrs ) )
__SCREAMING_SNAKE_CASE : List[str] = getattr(self.obj , lowerCAmelCase__ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowerCAmelCase__ , lowerCAmelCase__ , _PatchedModuleObj(getattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , attrs=self.attrs ) )
__SCREAMING_SNAKE_CASE : Tuple = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
# finally set the target attribute
setattr(lowerCAmelCase__ , lowerCAmelCase__ , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
__SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(import_module('''.'''.join(lowerCAmelCase__ ) ) , lowerCAmelCase__ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowerCAmelCase__ ) is attr_value:
__SCREAMING_SNAKE_CASE : Any = getattr(self.obj , lowerCAmelCase__ )
setattr(self.obj , lowerCAmelCase__ , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
__SCREAMING_SNAKE_CASE : Union[str, Any] = globals()['''__builtins__'''][target_attr]
setattr(self.obj , lowerCAmelCase__ , self.new )
else:
raise RuntimeError(f'''Tried to patch attribute {target_attr} instead of a submodule.''' )
def __exit__( self :str , *lowerCAmelCase__ :Union[str, Any] ) -> Optional[int]:
for attr in list(self.original ):
setattr(self.obj , lowerCAmelCase__ , self.original.pop(lowerCAmelCase__ ) )
def __magic_name__( self :List[Any] ) -> List[Any]:
self.__enter__()
self._active_patches.append(self )
def __magic_name__( self :Optional[int] ) -> int:
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 9 | 1 |
from cva import destroyAllWindows, imread, imshow, waitKey
def _UpperCamelCase ( lowercase__ ):
# getting number of pixels in the image
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(lowercase__ ):
for j in range(lowercase__ ):
__SCREAMING_SNAKE_CASE : int = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__lowerCAmelCase : Optional[Any] =imread('image_data/lena.jpg', 1)
# convert to its negative
__lowerCAmelCase : Union[str, Any] =convert_to_negative(img)
# show result image
imshow('negative of original image', img)
waitKey(0)
destroyAllWindows()
| 9 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__lowerCAmelCase : List[str] ='true'
def _UpperCamelCase ( lowercase__ , lowercase__=82 , lowercase__=16 ):
set_seed(42 )
__SCREAMING_SNAKE_CASE : Optional[int] = RegressionModel()
__SCREAMING_SNAKE_CASE : Optional[int] = deepcopy(lowercase__ )
__SCREAMING_SNAKE_CASE : Any = RegressionDataset(length=lowercase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = DataLoader(lowercase__ , batch_size=lowercase__ )
model.to(accelerator.device )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = accelerator.prepare(lowercase__ , lowercase__ )
return model, ddp_model, dataloader
def _UpperCamelCase ( lowercase__ , lowercase__=False ):
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
__SCREAMING_SNAKE_CASE : str = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
with accelerator.main_process_first():
__SCREAMING_SNAKE_CASE : Tuple = dataset.map(
lowercase__ , batched=lowercase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
__SCREAMING_SNAKE_CASE : List[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowercase__ ):
if use_longest:
return tokenizer.pad(lowercase__ , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(lowercase__ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return DataLoader(lowercase__ , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=16 )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : str = Accelerator(dispatch_batches=lowercase__ , split_batches=lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = get_dataloader(lowercase__ , not dispatch_batches )
__SCREAMING_SNAKE_CASE : List[str] = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = accelerator.prepare(lowercase__ , lowercase__ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = []
for batch in dataloader:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = batch.values()
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Dict = model(lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = [], []
for logit, targ in logits_and_targets:
logits.append(lowercase__ )
targs.append(lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = torch.cat(lowercase__ ), torch.cat(lowercase__ )
return logits, targs
def _UpperCamelCase ( lowercase__ , lowercase__=82 , lowercase__=False , lowercase__=False , lowercase__=16 ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = get_basic_setup(lowercase__ , lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = generate_predictions(lowercase__ , lowercase__ , lowercase__ )
assert (
len(lowercase__ ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowercase__ )}'''
def _UpperCamelCase ( lowercase__ = False , lowercase__ = False ):
__SCREAMING_SNAKE_CASE : Optional[Any] = evaluate.load('''glue''' , '''mrpc''' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = get_mrpc_setup(lowercase__ , lowercase__ )
# First do baseline
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = setup['''no''']
model.to(lowercase__ )
model.eval()
for batch in dataloader:
batch.to(lowercase__ )
with torch.inference_mode():
__SCREAMING_SNAKE_CASE : Dict = model(**lowercase__ )
__SCREAMING_SNAKE_CASE : Dict = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowercase__ , references=batch['''labels'''] )
__SCREAMING_SNAKE_CASE : int = metric.compute()
# Then do distributed
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
__SCREAMING_SNAKE_CASE : int = model(**lowercase__ )
__SCREAMING_SNAKE_CASE : str = outputs.logits.argmax(dim=-1 )
__SCREAMING_SNAKE_CASE : Any = batch['''labels''']
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowercase__ , references=lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Dict = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(lowercase__ , lowercase__ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__SCREAMING_SNAKE_CASE : List[Any] = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(lowercase__ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
__SCREAMING_SNAKE_CASE : Tuple = Accelerator()
test_torch_metrics(lowercase__ , 512 )
accelerator.state._reset_state()
def _UpperCamelCase ( lowercase__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 9 | 1 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__lowerCAmelCase : List[Any] ='\\n Text data.\n Second line of data.'
__lowerCAmelCase : Tuple ='file'
@pytest.fixture(scope='''session''' )
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
__SCREAMING_SNAKE_CASE : Union[str, Any] = bytes(lowercase__ , '''utf-8''' )
with zstd.open(lowercase__ , '''wb''' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture
def _UpperCamelCase ( lowercase__ ):
with open(os.path.join(tmpfs.local_root_dir , lowercase__ ) , '''w''' ) as f:
f.write(lowercase__ )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
__SCREAMING_SNAKE_CASE : str = input_paths[compression_format]
__SCREAMING_SNAKE_CASE : Tuple = tmp_path / '''cache'''
__SCREAMING_SNAKE_CASE : Any = DownloadConfig(cache_dir=lowercase__ , extract_compressed_file=lowercase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = cached_path(lowercase__ , download_config=lowercase__ )
with open(lowercase__ ) as f:
__SCREAMING_SNAKE_CASE : Any = f.read()
with open(lowercase__ ) as f:
__SCREAMING_SNAKE_CASE : int = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' , [True, False] )
@pytest.mark.parametrize('''default_cache_dir''' , [True, False] )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = '''custom_cache'''
__SCREAMING_SNAKE_CASE : Optional[Any] = '''custom_extracted_dir'''
__SCREAMING_SNAKE_CASE : int = tmp_path / '''custom_extracted_path'''
if default_extracted:
__SCREAMING_SNAKE_CASE : List[Any] = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , lowercase__ )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(lowercase__ ) )
__SCREAMING_SNAKE_CASE : List[Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
__SCREAMING_SNAKE_CASE : int = xz_file
__SCREAMING_SNAKE_CASE : int = (
DownloadConfig(extract_compressed_file=lowercase__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=lowercase__ )
)
__SCREAMING_SNAKE_CASE : Optional[int] = cached_path(lowercase__ , download_config=lowercase__ )
assert Path(lowercase__ ).parent.parts[-2:] == expected
def _UpperCamelCase ( lowercase__ ):
# absolute path
__SCREAMING_SNAKE_CASE : Dict = str(Path(lowercase__ ).resolve() )
assert cached_path(lowercase__ ) == text_file
# relative path
__SCREAMING_SNAKE_CASE : str = str(Path(lowercase__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(lowercase__ ) == text_file
def _UpperCamelCase ( lowercase__ ):
# absolute path
__SCREAMING_SNAKE_CASE : int = str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(lowercase__ ):
cached_path(lowercase__ )
# relative path
__SCREAMING_SNAKE_CASE : Dict = '''./__missing_file__.txt'''
with pytest.raises(lowercase__ ):
cached_path(lowercase__ )
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = get_from_cache(F'''tmp://{tmpfs_file}''' )
with open(lowercase__ ) as f:
__SCREAMING_SNAKE_CASE : Tuple = f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , lowercase__ )
def _UpperCamelCase ( ):
with pytest.raises(lowercase__ ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , lowercase__ )
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(lowercase__ ):
http_get('''https://huggingface.co''' , temp_file=lowercase__ )
with pytest.raises(lowercase__ ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , lowercase__ )
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(lowercase__ ):
ftp_get('''ftp://huggingface.co''' , temp_file=lowercase__ )
with pytest.raises(lowercase__ ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , lowercase__ )
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : str = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(lowercase__ ):
fsspec_get('''s3://huggingface.co''' , temp_file=lowercase__ )
with pytest.raises(lowercase__ ):
fsspec_head('''s3://huggingface.co''' )
| 9 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__lowerCAmelCase : Union[str, Any] ={
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
}
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
if got_ver is None or want_ver is None:
raise ValueError(
F'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
F''' reinstalling {pkg}.''' )
if not ops[op](version.parse(lowercase__ ) , version.parse(lowercase__ ) ):
raise ImportError(
F'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def _UpperCamelCase ( lowercase__ , lowercase__ = None ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = F'''\n{hint}''' if hint is not None else ''''''
# non-versioned check
if re.match(R'''^[\w_\-\d]+$''' , lowercase__ ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = requirement, None, None
else:
__SCREAMING_SNAKE_CASE : List[Any] = re.findall(R'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''' , lowercase__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
F''' got {requirement}''' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = match[0]
__SCREAMING_SNAKE_CASE : Optional[int] = want_full.split(''',''' ) # there could be multiple requirements
__SCREAMING_SNAKE_CASE : Optional[Any] = {}
for w in want_range:
__SCREAMING_SNAKE_CASE : Any = re.findall(R'''^([\s!=<>]{1,2})(.+)''' , lowercase__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
F''' but got {requirement}''' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = match[0]
__SCREAMING_SNAKE_CASE : List[Any] = want_ver
if op not in ops:
raise ValueError(F'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
__SCREAMING_SNAKE_CASE : Optional[Any] = '''.'''.join([str(lowercase__ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
return
# check if any version is installed
try:
__SCREAMING_SNAKE_CASE : Optional[int] = importlib.metadata.version(lowercase__ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(lowercase__ , lowercase__ )
| 9 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase : Union[str, Any] ={
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[int] =[
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
__lowerCAmelCase : List[str] =[
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
__lowerCAmelCase : str =[
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
__lowerCAmelCase : str =[
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Optional[int] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 9 |
from __future__ import annotations
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = 0.00
__SCREAMING_SNAKE_CASE : List[str] = 0
for resistor in resistors:
if resistor <= 0:
__SCREAMING_SNAKE_CASE : Any = F'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(lowercase__ )
first_sum += 1 / float(lowercase__ )
index += 1
return 1 / first_sum
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = 0.00
__SCREAMING_SNAKE_CASE : int = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
__SCREAMING_SNAKE_CASE : Tuple = F'''Resistor at index {index} has a negative value!'''
raise ValueError(lowercase__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 | 1 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _UpperCamelCase ( *lowercase__ , lowercase__ = None , lowercase__=True , lowercase__=2 ):
from .. import __version__
__SCREAMING_SNAKE_CASE : Optional[Any] = take_from
__SCREAMING_SNAKE_CASE : List[str] = ()
if not isinstance(args[0] , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowercase__ ).base_version ) >= version.parse(lowercase__ ):
raise ValueError(
F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
F''' version {__version__} is >= {version_name}''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if isinstance(lowercase__ , lowercase__ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowercase__ ),)
__SCREAMING_SNAKE_CASE : List[Any] = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(lowercase__ , lowercase__ ):
values += (getattr(lowercase__ , lowercase__ ),)
__SCREAMING_SNAKE_CASE : List[str] = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
__SCREAMING_SNAKE_CASE : str = F'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
__SCREAMING_SNAKE_CASE : Any = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , lowercase__ , stacklevel=lowercase__ )
if isinstance(lowercase__ , lowercase__ ) and len(lowercase__ ) > 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1]
__SCREAMING_SNAKE_CASE : Dict = call_frame.filename
__SCREAMING_SNAKE_CASE : Optional[Any] = call_frame.lineno
__SCREAMING_SNAKE_CASE : int = call_frame.function
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(lowercase__ ) == 0:
return
elif len(lowercase__ ) == 1:
return values[0]
return values
| 9 |
from ..utils import DummyObject, requires_backends
class _lowercase ( metaclass=A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ['''keras_nlp''']
def __init__( self :Tuple , *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :Dict ) -> Dict:
requires_backends(self , ['''keras_nlp'''] )
| 9 | 1 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=True , lowercase__="pt" ):
__SCREAMING_SNAKE_CASE : List[str] = {'''add_prefix_space''': True} if isinstance(lowercase__ , lowercase__ ) and not line.startswith(''' ''' ) else {}
__SCREAMING_SNAKE_CASE : str = padding_side
return tokenizer(
[line] , max_length=lowercase__ , padding='''max_length''' if pad_to_max_length else None , truncation=lowercase__ , return_tensors=lowercase__ , add_special_tokens=lowercase__ , **lowercase__ , )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__=None , ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = input_ids.ne(lowercase__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :str , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :int , lowerCAmelCase__ :str , lowerCAmelCase__ :List[Any]="train" , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :List[str]=None , lowerCAmelCase__ :int=None , lowerCAmelCase__ :List[Any]="" , ) -> str:
super().__init__()
__SCREAMING_SNAKE_CASE : Dict = Path(lowerCAmelCase__ ).joinpath(type_path + '''.source''' )
__SCREAMING_SNAKE_CASE : Dict = Path(lowerCAmelCase__ ).joinpath(type_path + '''.target''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_char_lens(self.src_file )
__SCREAMING_SNAKE_CASE : int = max_source_length
__SCREAMING_SNAKE_CASE : Dict = max_target_length
assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}'''
__SCREAMING_SNAKE_CASE : Any = tokenizer
__SCREAMING_SNAKE_CASE : Dict = prefix
if n_obs is not None:
__SCREAMING_SNAKE_CASE : List[str] = self.src_lens[:n_obs]
__SCREAMING_SNAKE_CASE : Dict = src_lang
__SCREAMING_SNAKE_CASE : Union[str, Any] = tgt_lang
def __len__( self :List[Any] ) -> List[str]:
return len(self.src_lens )
def __getitem__( self :Union[str, Any] , lowerCAmelCase__ :str ) -> Dict[str, torch.Tensor]:
__SCREAMING_SNAKE_CASE : Any = index + 1 # linecache starts at 1
__SCREAMING_SNAKE_CASE : Optional[Any] = self.prefix + linecache.getline(str(self.src_file ) , lowerCAmelCase__ ).rstrip('''\n''' )
__SCREAMING_SNAKE_CASE : str = linecache.getline(str(self.tgt_file ) , lowerCAmelCase__ ).rstrip('''\n''' )
assert source_line, f'''empty source line for index {index}'''
assert tgt_line, f'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowerCAmelCase__ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__SCREAMING_SNAKE_CASE : List[Any] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCAmelCase__ ) else self.tokenizer
)
__SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.generator if isinstance(self.tokenizer , lowerCAmelCase__ ) else self.tokenizer
__SCREAMING_SNAKE_CASE : Optional[int] = encode_line(lowerCAmelCase__ , lowerCAmelCase__ , self.max_source_length , '''right''' )
__SCREAMING_SNAKE_CASE : str = encode_line(lowerCAmelCase__ , lowerCAmelCase__ , self.max_target_length , '''right''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = source_inputs['''input_ids'''].squeeze()
__SCREAMING_SNAKE_CASE : List[str] = target_inputs['''input_ids'''].squeeze()
__SCREAMING_SNAKE_CASE : Tuple = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def __magic_name__( lowerCAmelCase__ :List[Any] ) -> Optional[int]:
return [len(lowerCAmelCase__ ) for x in Path(lowerCAmelCase__ ).open().readlines()]
def __magic_name__( self :Any , lowerCAmelCase__ :Tuple ) -> Dict[str, torch.Tensor]:
__SCREAMING_SNAKE_CASE : int = torch.stack([x['''input_ids'''] for x in batch] )
__SCREAMING_SNAKE_CASE : Tuple = torch.stack([x['''attention_mask'''] for x in batch] )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.stack([x['''decoder_input_ids'''] for x in batch] )
__SCREAMING_SNAKE_CASE : List[Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowerCAmelCase__ )
else self.tokenizer.pad_token_id
)
__SCREAMING_SNAKE_CASE : str = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowerCAmelCase__ )
else self.tokenizer.pad_token_id
)
__SCREAMING_SNAKE_CASE : Any = trim_batch(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = trim_batch(lowerCAmelCase__ , lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
__lowerCAmelCase : Optional[int] =getLogger(__name__)
def _UpperCamelCase ( lowercase__ ):
return list(itertools.chain.from_iterable(lowercase__ ) )
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = get_git_info()
save_json(lowercase__ , os.path.join(lowercase__ , '''git_log.json''' ) )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__=4 , **lowercase__ ):
with open(lowercase__ , '''w''' ) as f:
json.dump(lowercase__ , lowercase__ , indent=lowercase__ , **lowercase__ )
def _UpperCamelCase ( lowercase__ ):
with open(lowercase__ ) as f:
return json.load(lowercase__ )
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Tuple = git.Repo(search_parent_directories=lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = {
'''repo_id''': str(lowercase__ ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def _UpperCamelCase ( lowercase__ , lowercase__ ):
return list(map(lowercase__ , lowercase__ ) )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
with open(lowercase__ , '''wb''' ) as f:
return pickle.dump(lowercase__ , lowercase__ )
def _UpperCamelCase ( lowercase__ ):
def remove_articles(lowercase__ ):
return re.sub(R'''\b(a|an|the)\b''' , ''' ''' , lowercase__ )
def white_space_fix(lowercase__ ):
return " ".join(text.split() )
def remove_punc(lowercase__ ):
__SCREAMING_SNAKE_CASE : int = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowercase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowercase__ ) ) ) )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : str = normalize_answer(lowercase__ ).split()
__SCREAMING_SNAKE_CASE : Optional[int] = normalize_answer(lowercase__ ).split()
__SCREAMING_SNAKE_CASE : Union[str, Any] = Counter(lowercase__ ) & Counter(lowercase__ )
__SCREAMING_SNAKE_CASE : str = sum(common.values() )
if num_same == 0:
return 0
__SCREAMING_SNAKE_CASE : Tuple = 1.0 * num_same / len(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = 1.0 * num_same / len(lowercase__ )
__SCREAMING_SNAKE_CASE : Dict = (2 * precision * recall) / (precision + recall)
return fa
def _UpperCamelCase ( lowercase__ , lowercase__ ):
return normalize_answer(lowercase__ ) == normalize_answer(lowercase__ )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
assert len(lowercase__ ) == len(lowercase__ )
__SCREAMING_SNAKE_CASE : str = 0
for hypo, pred in zip(lowercase__ , lowercase__ ):
em += exact_match_score(lowercase__ , lowercase__ )
if len(lowercase__ ) > 0:
em /= len(lowercase__ )
return {"em": em}
def _UpperCamelCase ( lowercase__ ):
return model_prefix.startswith('''rag''' )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__SCREAMING_SNAKE_CASE : List[str] = '''dropout_rate'''
for p in extra_params:
if getattr(lowercase__ , lowercase__ , lowercase__ ):
if not hasattr(lowercase__ , lowercase__ ) and not hasattr(lowercase__ , equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(lowercase__ ) )
delattr(lowercase__ , lowercase__ )
continue
__SCREAMING_SNAKE_CASE : int = p if hasattr(lowercase__ , lowercase__ ) else equivalent_param[p]
setattr(lowercase__ , lowercase__ , getattr(lowercase__ , lowercase__ ) )
delattr(lowercase__ , lowercase__ )
return hparams, config
| 9 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self :Any , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple=7 , lowerCAmelCase__ :List[Any]=3 , lowerCAmelCase__ :Any=10 , lowerCAmelCase__ :Optional[int]=18 , lowerCAmelCase__ :Dict=30 , lowerCAmelCase__ :Tuple=400 , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :str=True , lowerCAmelCase__ :List[str]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :List[str]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :Optional[Any]=None , ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Dict = size if size is not None else {'''shortest_edge''': 18}
__SCREAMING_SNAKE_CASE : Optional[int] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__SCREAMING_SNAKE_CASE : Tuple = parent
__SCREAMING_SNAKE_CASE : List[Any] = batch_size
__SCREAMING_SNAKE_CASE : List[str] = num_channels
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_frames
__SCREAMING_SNAKE_CASE : Tuple = image_size
__SCREAMING_SNAKE_CASE : Optional[Any] = min_resolution
__SCREAMING_SNAKE_CASE : Any = max_resolution
__SCREAMING_SNAKE_CASE : List[Any] = do_resize
__SCREAMING_SNAKE_CASE : Optional[Any] = size
__SCREAMING_SNAKE_CASE : Optional[int] = do_normalize
__SCREAMING_SNAKE_CASE : List[Any] = image_mean
__SCREAMING_SNAKE_CASE : List[str] = image_std
__SCREAMING_SNAKE_CASE : str = crop_size
def __magic_name__( self :Tuple ) -> Any:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = VivitImageProcessor if is_vision_available() else None
def __magic_name__( self :List[str] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = VivitImageProcessingTester(self )
@property
def __magic_name__( self :int ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__( self :List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_std''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''size''' ) )
def __magic_name__( self :Optional[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def __magic_name__( self :List[Any] ) -> Union[str, Any]:
# Initialize image_processing
__SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
__SCREAMING_SNAKE_CASE : List[Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for video in video_inputs:
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE : List[str] = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __magic_name__( self :str ) -> int:
# Initialize image_processing
__SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__SCREAMING_SNAKE_CASE : List[str] = prepare_video_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for video in video_inputs:
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
__SCREAMING_SNAKE_CASE : Optional[int] = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : Any = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __magic_name__( self :Any ) -> List[str]:
# Initialize image_processing
__SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE : Optional[int] = prepare_video_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for video in video_inputs:
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
__SCREAMING_SNAKE_CASE : Tuple = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : Optional[int] = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 9 | 1 |
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
__lowerCAmelCase : List[str] =None
try:
import msvcrt
except ImportError:
__lowerCAmelCase : List[Any] =None
try:
import fcntl
except ImportError:
__lowerCAmelCase : str =None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
__lowerCAmelCase : int =OSError
# Data
# ------------------------------------------------
__lowerCAmelCase : List[Any] =[
'Timeout',
'BaseFileLock',
'WindowsFileLock',
'UnixFileLock',
'SoftFileLock',
'FileLock',
]
__lowerCAmelCase : Dict ='3.0.12'
__lowerCAmelCase : Dict =None
def _UpperCamelCase ( ):
global _logger
__SCREAMING_SNAKE_CASE : Dict = _logger or logging.getLogger(__name__ )
return _logger
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :str , lowerCAmelCase__ :int ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Any = lock_file
return None
def __str__( self :Dict ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : List[str] = f'''The file lock \'{self.lock_file}\' could not be acquired.'''
return temp
class _lowercase :
'''simple docstring'''
def __init__( self :List[Any] , lowerCAmelCase__ :List[Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : str = lock
return None
def __enter__( self :List[str] ) -> int:
return self.lock
def __exit__( self :Union[str, Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Union[str, Any] ) -> str:
self.lock.release()
return None
class _lowercase :
'''simple docstring'''
def __init__( self :int , lowerCAmelCase__ :int , lowerCAmelCase__ :Any=-1 , lowerCAmelCase__ :List[Any]=None ) -> Dict:
__SCREAMING_SNAKE_CASE : List[Any] = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
__SCREAMING_SNAKE_CASE : Optional[int] = self.hash_filename_if_too_long(lowerCAmelCase__ , lowerCAmelCase__ )
# The path to the lock file.
__SCREAMING_SNAKE_CASE : Tuple = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__SCREAMING_SNAKE_CASE : Any = None
# The default timeout value.
__SCREAMING_SNAKE_CASE : Union[str, Any] = timeout
# We use this lock primarily for the lock counter.
__SCREAMING_SNAKE_CASE : Any = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__SCREAMING_SNAKE_CASE : Any = 0
return None
@property
def __magic_name__( self :Union[str, Any] ) -> Dict:
return self._lock_file
@property
def __magic_name__( self :Union[str, Any] ) -> int:
return self._timeout
@timeout.setter
def __magic_name__( self :Any , lowerCAmelCase__ :str ) -> Any:
__SCREAMING_SNAKE_CASE : int = float(lowerCAmelCase__ )
return None
def __magic_name__( self :List[Any] ) -> List[str]:
raise NotImplementedError()
def __magic_name__( self :int ) -> int:
raise NotImplementedError()
@property
def __magic_name__( self :List[Any] ) -> Union[str, Any]:
return self._lock_file_fd is not None
def __magic_name__( self :str , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :int=0.05 ) -> int:
# Use the default timeout, if no timeout is provided.
if timeout is None:
__SCREAMING_SNAKE_CASE : Tuple = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__SCREAMING_SNAKE_CASE : Tuple = id(self )
__SCREAMING_SNAKE_CASE : Dict = self._lock_file
__SCREAMING_SNAKE_CASE : int = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f'''Attempting to acquire lock {lock_id} on {lock_filename}''' )
self._acquire()
if self.is_locked:
logger().debug(f'''Lock {lock_id} acquired on {lock_filename}''' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f'''Timeout on acquiring lock {lock_id} on {lock_filename}''' )
raise Timeout(self._lock_file )
else:
logger().debug(
f'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' )
time.sleep(lowerCAmelCase__ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__SCREAMING_SNAKE_CASE : Union[str, Any] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def __magic_name__( self :Dict , lowerCAmelCase__ :Any=False ) -> Tuple:
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__SCREAMING_SNAKE_CASE : Any = id(self )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self._lock_file
logger().debug(f'''Attempting to release lock {lock_id} on {lock_filename}''' )
self._release()
__SCREAMING_SNAKE_CASE : Any = 0
logger().debug(f'''Lock {lock_id} released on {lock_filename}''' )
return None
def __enter__( self :Tuple ) -> List[Any]:
self.acquire()
return self
def __exit__( self :Tuple , lowerCAmelCase__ :str , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[Any] ) -> int:
self.release()
return None
def __del__( self :Union[str, Any] ) -> Any:
self.release(force=lowerCAmelCase__ )
return None
def __magic_name__( self :int , lowerCAmelCase__ :str , lowerCAmelCase__ :int ) -> str:
__SCREAMING_SNAKE_CASE : str = os.path.basename(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > max_length and max_length > 0:
__SCREAMING_SNAKE_CASE : str = os.path.dirname(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = str(hash(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : List[str] = filename[: max_length - len(lowerCAmelCase__ ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
else:
return path
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any]=-1 , lowerCAmelCase__ :Optional[int]=None ) -> List[Any]:
from .file_utils import relative_to_absolute_path
super().__init__(lowerCAmelCase__ , timeout=lowerCAmelCase__ , max_filename_length=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def __magic_name__( self :Tuple ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Tuple = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__SCREAMING_SNAKE_CASE : Dict = os.open(self._lock_file , lowerCAmelCase__ )
except OSError:
pass
else:
try:
msvcrt.locking(lowerCAmelCase__ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : List[str] = fd
return None
def __magic_name__( self :Tuple ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Optional[Any] = self._lock_file_fd
__SCREAMING_SNAKE_CASE : int = None
msvcrt.locking(lowerCAmelCase__ , msvcrt.LK_UNLCK , 1 )
os.close(lowerCAmelCase__ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :int , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :List[str]=-1 , lowerCAmelCase__ :Optional[Any]=None ) -> Any:
__SCREAMING_SNAKE_CASE : Dict = os.statvfs(os.path.dirname(lowerCAmelCase__ ) ).f_namemax
super().__init__(lowerCAmelCase__ , timeout=lowerCAmelCase__ , max_filename_length=lowerCAmelCase__ )
def __magic_name__( self :Optional[int] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : str = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__SCREAMING_SNAKE_CASE : Union[str, Any] = os.open(self._lock_file , lowerCAmelCase__ )
try:
fcntl.flock(lowerCAmelCase__ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : str = fd
return None
def __magic_name__( self :Tuple ) -> Optional[int]:
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
__SCREAMING_SNAKE_CASE : int = self._lock_file_fd
__SCREAMING_SNAKE_CASE : List[str] = None
fcntl.flock(lowerCAmelCase__ , fcntl.LOCK_UN )
os.close(lowerCAmelCase__ )
return None
class _lowercase ( A__ ):
'''simple docstring'''
def __magic_name__( self :Tuple ) -> int:
__SCREAMING_SNAKE_CASE : List[str] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__SCREAMING_SNAKE_CASE : int = os.open(self._lock_file , lowerCAmelCase__ )
except OSError:
pass
else:
__SCREAMING_SNAKE_CASE : int = fd
return None
def __magic_name__( self :List[Any] ) -> Optional[int]:
os.close(self._lock_file_fd )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
__lowerCAmelCase : Optional[int] =None
if msvcrt:
__lowerCAmelCase : str =WindowsFileLock
elif fcntl:
__lowerCAmelCase : Union[str, Any] =UnixFileLock
else:
__lowerCAmelCase : Union[str, Any] =SoftFileLock
if warnings is not None:
warnings.warn('only soft file lock is available')
| 9 |
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self :Optional[Any] , lowerCAmelCase__ :Optional[Any] ) -> str:
__SCREAMING_SNAKE_CASE : Optional[Any] = parent
def __magic_name__( self :List[Any] ) -> Tuple:
return {}
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Optional[Any] = '''<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR="FFFFFF">
<HR>
<a href="http://google.com">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style="color:#0000FF">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>'''
__SCREAMING_SNAKE_CASE : str = '''
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
'''
return [html_string_a, html_string_a]
@require_bsa
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = MarkupLMFeatureExtractor if is_bsa_available() else None
def __magic_name__( self :int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = MarkupLMFeatureExtractionTester(self )
@property
def __magic_name__( self :Any ) -> Optional[Any]:
return self.feature_extract_tester.prepare_feat_extract_dict()
def __magic_name__( self :Optional[int] ) -> Any:
# Initialize feature_extractor
__SCREAMING_SNAKE_CASE : int = self.feature_extraction_class()
# Test not batched input
__SCREAMING_SNAKE_CASE : Tuple = get_html_strings()[0]
__SCREAMING_SNAKE_CASE : Dict = feature_extractor(lowerCAmelCase__ )
# fmt: off
__SCREAMING_SNAKE_CASE : str = [['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']]
__SCREAMING_SNAKE_CASE : List[str] = [['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']]
# fmt: on
self.assertEqual(encoding.nodes , lowerCAmelCase__ )
self.assertEqual(encoding.xpaths , lowerCAmelCase__ )
# Test batched
__SCREAMING_SNAKE_CASE : Tuple = get_html_strings()
__SCREAMING_SNAKE_CASE : Dict = feature_extractor(lowerCAmelCase__ )
# fmt: off
__SCREAMING_SNAKE_CASE : int = expected_nodes + [['''My First Heading''', '''My first paragraph.''']]
__SCREAMING_SNAKE_CASE : str = expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , lowerCAmelCase__ )
self.assertEqual(encoding.xpaths , lowerCAmelCase__ )
| 9 | 1 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
__lowerCAmelCase : List[str] ={'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
__lowerCAmelCase : Union[str, Any] ={
'vocab_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
},
'merges_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
},
}
__lowerCAmelCase : List[str] ={
'allenai/longformer-base-4096': 4_0_9_6,
'allenai/longformer-large-4096': 4_0_9_6,
'allenai/longformer-large-4096-finetuned-triviaqa': 4_0_9_6,
'allenai/longformer-base-4096-extra.pos.embd.only': 4_0_9_6,
'allenai/longformer-large-4096-extra.pos.embd.only': 4_0_9_6,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : List[str] = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
__SCREAMING_SNAKE_CASE : Tuple = bs[:]
__SCREAMING_SNAKE_CASE : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowercase__ )
cs.append(2**8 + n )
n += 1
__SCREAMING_SNAKE_CASE : Dict = [chr(lowercase__ ) for n in cs]
return dict(zip(lowercase__ , lowercase__ ) )
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = set()
__SCREAMING_SNAKE_CASE : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__SCREAMING_SNAKE_CASE : List[str] = char
return pairs
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Any = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ['''input_ids''', '''attention_mask''']
def __init__( self :str , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[Any]="replace" , lowerCAmelCase__ :Union[str, Any]="<s>" , lowerCAmelCase__ :str="</s>" , lowerCAmelCase__ :str="</s>" , lowerCAmelCase__ :Any="<s>" , lowerCAmelCase__ :Optional[Any]="<unk>" , lowerCAmelCase__ :Tuple="<pad>" , lowerCAmelCase__ :Optional[Any]="<mask>" , lowerCAmelCase__ :Any=False , **lowerCAmelCase__ :str , ) -> str:
__SCREAMING_SNAKE_CASE : Optional[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else bos_token
__SCREAMING_SNAKE_CASE : Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else eos_token
__SCREAMING_SNAKE_CASE : Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else sep_token
__SCREAMING_SNAKE_CASE : List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else cls_token
__SCREAMING_SNAKE_CASE : Optional[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else unk_token
__SCREAMING_SNAKE_CASE : Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__SCREAMING_SNAKE_CASE : Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding='''utf-8''' ) as vocab_handle:
__SCREAMING_SNAKE_CASE : Any = json.load(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = {v: k for k, v in self.encoder.items()}
__SCREAMING_SNAKE_CASE : List[Any] = errors # how to handle errors in decoding
__SCREAMING_SNAKE_CASE : str = bytes_to_unicode()
__SCREAMING_SNAKE_CASE : Any = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding='''utf-8''' ) as merges_handle:
__SCREAMING_SNAKE_CASE : List[Any] = merges_handle.read().split('''\n''' )[1:-1]
__SCREAMING_SNAKE_CASE : Any = [tuple(merge.split() ) for merge in bpe_merges]
__SCREAMING_SNAKE_CASE : Any = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__SCREAMING_SNAKE_CASE : Any = {}
__SCREAMING_SNAKE_CASE : str = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__SCREAMING_SNAKE_CASE : Optional[Any] = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def __magic_name__( self :Tuple ) -> List[Any]:
return len(self.encoder )
def __magic_name__( self :Dict ) -> str:
return dict(self.encoder , **self.added_tokens_encoder )
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :Any ) -> Dict:
if token in self.cache:
return self.cache[token]
__SCREAMING_SNAKE_CASE : Optional[int] = tuple(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = get_pairs(lowerCAmelCase__ )
if not pairs:
return token
while True:
__SCREAMING_SNAKE_CASE : str = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = bigram
__SCREAMING_SNAKE_CASE : List[Any] = []
__SCREAMING_SNAKE_CASE : List[Any] = 0
while i < len(lowerCAmelCase__ ):
try:
__SCREAMING_SNAKE_CASE : Optional[int] = word.index(lowerCAmelCase__ , lowerCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__SCREAMING_SNAKE_CASE : List[Any] = j
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__SCREAMING_SNAKE_CASE : Union[str, Any] = tuple(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = get_pairs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = ''' '''.join(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = word
return word
def __magic_name__( self :Tuple , lowerCAmelCase__ :List[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : Optional[int] = []
for token in re.findall(self.pat , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : str = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__ ).split(''' ''' ) )
return bpe_tokens
def __magic_name__( self :Dict , lowerCAmelCase__ :Dict ) -> int:
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token ) )
def __magic_name__( self :int , lowerCAmelCase__ :List[str] ) -> int:
return self.decoder.get(lowerCAmelCase__ )
def __magic_name__( self :str , lowerCAmelCase__ :str ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Dict = ''''''.join(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__SCREAMING_SNAKE_CASE : Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__SCREAMING_SNAKE_CASE : Dict = os.path.join(
lowerCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ ) + '''\n''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
__SCREAMING_SNAKE_CASE : Tuple = token_index
writer.write(''' '''.join(lowerCAmelCase__ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def __magic_name__( self :Any , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__SCREAMING_SNAKE_CASE : Union[str, Any] = [self.cls_token_id]
__SCREAMING_SNAKE_CASE : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __magic_name__( self :str , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None , lowerCAmelCase__ :bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]:
__SCREAMING_SNAKE_CASE : Optional[Any] = [self.sep_token_id]
__SCREAMING_SNAKE_CASE : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __magic_name__( self :str , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[str]=False , **lowerCAmelCase__ :Any ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Tuple = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__ ) > 0 and not text[0].isspace()):
__SCREAMING_SNAKE_CASE : int = ''' ''' + text
return (text, kwargs)
| 9 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = TransfoXLTokenizer
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
def __magic_name__( self :str ) -> Dict:
super().setUp()
__SCREAMING_SNAKE_CASE : List[str] = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
__SCREAMING_SNAKE_CASE : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __magic_name__( self :Any , **lowerCAmelCase__ :int ) -> str:
__SCREAMING_SNAKE_CASE : Optional[Any] = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :List[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : Dict = '''<unk> UNwanted , running'''
__SCREAMING_SNAKE_CASE : List[str] = '''<unk> unwanted, running'''
return input_text, output_text
def __magic_name__( self :Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : int = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(lowerCAmelCase__ , ['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [0, 4, 8, 7] )
def __magic_name__( self :Tuple ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = TransfoXLTokenizer(lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def __magic_name__( self :Tuple ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = TransfoXLTokenizer(lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __magic_name__( self :Dict ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[str] = TransfoXLTokenizer(lower_case=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
__SCREAMING_SNAKE_CASE : Optional[int] = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(tokenizer.convert_tokens_to_string(lowerCAmelCase__ ) , lowerCAmelCase__ )
def __magic_name__( self :str ) -> int:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Any = len(lowerCAmelCase__ )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(lowerCAmelCase__ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , '''new1''' )
| 9 | 1 |
from math import factorial
def _UpperCamelCase ( lowercase__ = 100 ):
return sum(int(lowercase__ ) for x in str(factorial(lowercase__ ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 9 |
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__=False ):
if isinstance(lowercase__ , lowercase__ ) and isinstance(lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = len(set_a.intersection(lowercase__ ) )
if alternative_union:
__SCREAMING_SNAKE_CASE : int = len(lowercase__ ) + len(lowercase__ )
else:
__SCREAMING_SNAKE_CASE : int = len(set_a.union(lowercase__ ) )
return intersection / union
if isinstance(lowercase__ , (list, tuple) ) and isinstance(lowercase__ , (list, tuple) ):
__SCREAMING_SNAKE_CASE : Dict = [element for element in set_a if element in set_b]
if alternative_union:
__SCREAMING_SNAKE_CASE : Optional[int] = len(lowercase__ ) + len(lowercase__ )
return len(lowercase__ ) / union
else:
__SCREAMING_SNAKE_CASE : Tuple = set_a + [element for element in set_b if element not in set_a]
return len(lowercase__ ) / len(lowercase__ )
return len(lowercase__ ) / len(lowercase__ )
return None
if __name__ == "__main__":
__lowerCAmelCase : List[Any] ={'a', 'b', 'c', 'd', 'e'}
__lowerCAmelCase : Optional[Any] ={'c', 'd', 'e', 'f', 'h', 'i'}
print(jaccard_similarity(set_a, set_b))
| 9 | 1 |
from __future__ import annotations
from typing import Any
def _UpperCamelCase ( lowercase__ ):
if not postfix_notation:
return 0
__SCREAMING_SNAKE_CASE : Dict = {'''+''', '''-''', '''*''', '''/'''}
__SCREAMING_SNAKE_CASE : list[Any] = []
for token in postfix_notation:
if token in operations:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(lowercase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 |
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
__lowerCAmelCase : Optional[int] ='\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
__lowerCAmelCase : Optional[Any] ='\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
__lowerCAmelCase : Dict ='\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def _UpperCamelCase ( lowercase__ , lowercase__ ):
return float((preds == labels).mean() )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = simple_accuracy(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE : List[str] = float(fa_score(y_true=lowercase__ , y_pred=lowercase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = np.array(lowercase__ )
__SCREAMING_SNAKE_CASE : str = np.array(lowercase__ )
__SCREAMING_SNAKE_CASE : str = en_sentvecs.shape[0]
# mean centering
__SCREAMING_SNAKE_CASE : Tuple = en_sentvecs - np.mean(lowercase__ , axis=0 )
__SCREAMING_SNAKE_CASE : Optional[int] = in_sentvecs - np.mean(lowercase__ , axis=0 )
__SCREAMING_SNAKE_CASE : str = cdist(lowercase__ , lowercase__ , '''cosine''' )
__SCREAMING_SNAKE_CASE : int = np.array(range(lowercase__ ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = sim.argsort(axis=1 )[:, :10]
__SCREAMING_SNAKE_CASE : str = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def __magic_name__( self :Tuple ) -> Tuple:
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
'''references''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if self.config_name != '''cvit-mkb-clsr''' else None , )
def __magic_name__( self :List[str] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple ) -> str:
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(lowerCAmelCase__ , lowerCAmelCase__ )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(lowerCAmelCase__ , lowerCAmelCase__ )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
| 9 | 1 |
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def _UpperCamelCase ( *lowercase__ ):
if not isinstance(lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = list(lowercase__ )
for i in range(len(lowercase__ ) ):
__SCREAMING_SNAKE_CASE : str = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'''CUDA out of memory.''', # CUDA OOM
'''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU
'''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM
]
if isinstance(lowercase__ , lowercase__ ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def _UpperCamelCase ( lowercase__ = None , lowercase__ = 128 ):
if function is None:
return functools.partial(lowercase__ , starting_batch_size=lowercase__ )
__SCREAMING_SNAKE_CASE : str = starting_batch_size
def decorator(*lowercase__ , **lowercase__ ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
__SCREAMING_SNAKE_CASE : Optional[Any] = list(inspect.signature(lowercase__ ).parameters.keys() )
# Guard against user error
if len(lowercase__ ) < (len(lowercase__ ) + 1):
__SCREAMING_SNAKE_CASE : List[Any] = ''', '''.join([F'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
F'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError('''No executable batch size found, reached zero.''' )
try:
return function(lowercase__ , *lowercase__ , **lowercase__ )
except Exception as e:
if should_reduce_batch_size(lowercase__ ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 9 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = nn.functional.normalize(lowercase__ )
__SCREAMING_SNAKE_CASE : Tuple = nn.functional.normalize(lowercase__ )
return torch.mm(lowercase__ , normalized_text_embeds.t() )
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = CLIPConfig
SCREAMING_SNAKE_CASE__ : List[str] = ['''CLIPEncoderLayer''']
def __init__( self :str , lowerCAmelCase__ :CLIPConfig ) -> Tuple:
super().__init__(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = CLIPVisionModel(config.vision_config )
__SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.ones(17 ) , requires_grad=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = nn.Parameter(torch.ones(3 ) , requires_grad=lowerCAmelCase__ )
@torch.no_grad()
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[int] ) -> Dict:
__SCREAMING_SNAKE_CASE : int = self.vision_model(lowerCAmelCase__ )[1] # pooled_output
__SCREAMING_SNAKE_CASE : Optional[Any] = self.visual_projection(lowerCAmelCase__ )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__SCREAMING_SNAKE_CASE : Optional[Any] = cosine_distance(lowerCAmelCase__ , self.special_care_embeds ).cpu().float().numpy()
__SCREAMING_SNAKE_CASE : Optional[Any] = cosine_distance(lowerCAmelCase__ , self.concept_embeds ).cpu().float().numpy()
__SCREAMING_SNAKE_CASE : List[Any] = []
__SCREAMING_SNAKE_CASE : List[Any] = image_embeds.shape[0]
for i in range(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
__SCREAMING_SNAKE_CASE : List[Any] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
__SCREAMING_SNAKE_CASE : List[str] = special_cos_dist[i][concept_idx]
__SCREAMING_SNAKE_CASE : Any = self.special_care_embeds_weights[concept_idx].item()
__SCREAMING_SNAKE_CASE : Union[str, Any] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} )
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
__SCREAMING_SNAKE_CASE : int = cos_dist[i][concept_idx]
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.concept_embeds_weights[concept_idx].item()
__SCREAMING_SNAKE_CASE : Tuple = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(lowerCAmelCase__ )
result.append(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = [len(res['''bad_concepts'''] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :torch.FloatTensor ) -> Any:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.vision_model(lowerCAmelCase__ )[1] # pooled_output
__SCREAMING_SNAKE_CASE : List[str] = self.visual_projection(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = cosine_distance(lowerCAmelCase__ , self.special_care_embeds )
__SCREAMING_SNAKE_CASE : Optional[int] = cosine_distance(lowerCAmelCase__ , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
__SCREAMING_SNAKE_CASE : List[Any] = 0.0
__SCREAMING_SNAKE_CASE : Union[str, Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
__SCREAMING_SNAKE_CASE : List[str] = torch.any(special_scores > 0 , dim=1 )
__SCREAMING_SNAKE_CASE : List[str] = special_care * 0.01
__SCREAMING_SNAKE_CASE : int = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
__SCREAMING_SNAKE_CASE : Optional[int] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
__SCREAMING_SNAKE_CASE : Any = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 9 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCAmelCase : Any ={
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] =['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] =['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Tuple =['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__lowerCAmelCase : Dict =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 9 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__lowerCAmelCase : List[Any] =datasets.load_iris()
__lowerCAmelCase : Tuple =np.array(data['data'])
__lowerCAmelCase : Dict =np.array(data['target'])
__lowerCAmelCase : List[str] =data['target_names']
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : str =train_test_split(X, y)
def _UpperCamelCase ( lowercase__ , lowercase__ ):
return np.linalg.norm(np.array(lowercase__ ) - np.array(lowercase__ ) )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=5 ):
__SCREAMING_SNAKE_CASE : Optional[int] = zip(lowercase__ , lowercase__ )
# List of distances of all points from the point to be classified
__SCREAMING_SNAKE_CASE : Dict = []
for data_point in data:
__SCREAMING_SNAKE_CASE : Tuple = euclidean_distance(data_point[0] , lowercase__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
__SCREAMING_SNAKE_CASE : int = [i[1] for i in sorted(lowercase__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__SCREAMING_SNAKE_CASE : Any = Counter(lowercase__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 9 | 1 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__lowerCAmelCase : List[Any] =datasets.load_iris()
__lowerCAmelCase : Tuple =np.array(data['data'])
__lowerCAmelCase : Dict =np.array(data['target'])
__lowerCAmelCase : List[str] =data['target_names']
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : str =train_test_split(X, y)
def _UpperCamelCase ( lowercase__ , lowercase__ ):
return np.linalg.norm(np.array(lowercase__ ) - np.array(lowercase__ ) )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=5 ):
__SCREAMING_SNAKE_CASE : Optional[int] = zip(lowercase__ , lowercase__ )
# List of distances of all points from the point to be classified
__SCREAMING_SNAKE_CASE : Dict = []
for data_point in data:
__SCREAMING_SNAKE_CASE : Tuple = euclidean_distance(data_point[0] , lowercase__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
__SCREAMING_SNAKE_CASE : int = [i[1] for i in sorted(lowercase__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__SCREAMING_SNAKE_CASE : Any = Counter(lowercase__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 9 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class _lowercase :
'''simple docstring'''
@staticmethod
def __magic_name__( *lowerCAmelCase__ :Union[str, Any] , **lowerCAmelCase__ :str ) -> Union[str, Any]:
pass
def _UpperCamelCase ( lowercase__ ):
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
__lowerCAmelCase : str =(
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def __magic_name__( self :Any , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Any ) -> Any:
__SCREAMING_SNAKE_CASE : Optional[int] = pipeline(
'''document-question-answering''' , model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = INVOICE_URL
__SCREAMING_SNAKE_CASE : Optional[Any] = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , '''''' ) ) )
__SCREAMING_SNAKE_CASE : str = '''What is the placebo?'''
__SCREAMING_SNAKE_CASE : str = [
{
'''image''': load_image(lowerCAmelCase__ ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Tuple ) -> str:
__SCREAMING_SNAKE_CASE : Any = dqa_pipeline(lowerCAmelCase__ , top_k=2 )
self.assertEqual(
lowerCAmelCase__ , [
[
{'''score''': ANY(lowerCAmelCase__ ), '''answer''': ANY(lowerCAmelCase__ ), '''start''': ANY(lowerCAmelCase__ ), '''end''': ANY(lowerCAmelCase__ )},
{'''score''': ANY(lowerCAmelCase__ ), '''answer''': ANY(lowerCAmelCase__ ), '''start''': ANY(lowerCAmelCase__ ), '''end''': ANY(lowerCAmelCase__ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def __magic_name__( self :Dict ) -> List[str]:
__SCREAMING_SNAKE_CASE : Tuple = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
__SCREAMING_SNAKE_CASE : Dict = INVOICE_URL
__SCREAMING_SNAKE_CASE : int = '''How many cats are there?'''
__SCREAMING_SNAKE_CASE : Optional[int] = [
{'''score''': 0.0001, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0001, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
__SCREAMING_SNAKE_CASE : Tuple = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , lowerCAmelCase__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__SCREAMING_SNAKE_CASE : Any = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__SCREAMING_SNAKE_CASE : List[Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(lowerCAmelCase__ , [] )
# We can optionnally pass directly the words and bounding boxes
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : Union[str, Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , words=lowerCAmelCase__ , boxes=lowerCAmelCase__ , top_k=2 )
self.assertEqual(lowerCAmelCase__ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __magic_name__( self :int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
__SCREAMING_SNAKE_CASE : Dict = INVOICE_URL
__SCREAMING_SNAKE_CASE : Any = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : Any = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : Tuple = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : List[Any] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __magic_name__( self :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : int = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = INVOICE_URL
__SCREAMING_SNAKE_CASE : Tuple = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : List[str] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : int = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : str = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __magic_name__( self :int ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=lowerCAmelCase__ , revision='''3dc6de3''' , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = INVOICE_URL
__SCREAMING_SNAKE_CASE : str = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : Dict = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
__SCREAMING_SNAKE_CASE : Optional[int] = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , '''''' ) ) )
# This model should also work if `image` is set to None
__SCREAMING_SNAKE_CASE : str = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __magic_name__( self :str ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=lowerCAmelCase__ , revision='''3dc6de3''' , max_seq_len=50 , )
__SCREAMING_SNAKE_CASE : List[str] = INVOICE_URL
__SCREAMING_SNAKE_CASE : Dict = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : List[Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : Optional[int] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
__SCREAMING_SNAKE_CASE : List[str] = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , '''''' ) ) )
# This model should also work if `image` is set to None
__SCREAMING_SNAKE_CASE : List[Any] = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def __magic_name__( self :Union[str, Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : str = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = INVOICE_URL
__SCREAMING_SNAKE_CASE : Optional[int] = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : Tuple = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def __magic_name__( self :Union[str, Any] ) -> Tuple:
pass
| 9 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.