code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt')
UpperCamelCase = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
"""simple docstring"""
snake_case__ = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case__ = field(
default=UpperCamelCase__ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
snake_case__ = field(
default=UpperCamelCase__ , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
snake_case__ = field(
default=UpperCamelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
snake_case__ = field(
default=UpperCamelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
snake_case__ = field(
default=UpperCamelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
@dataclass
class __lowerCamelCase :
"""simple docstring"""
snake_case__ = field(
default=UpperCamelCase__ , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
snake_case__ = field(
default=UpperCamelCase__ , metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."} )
snake_case__ = field(
default=UpperCamelCase__ , metadata={"help": "Train language if it is different from the evaluation language."} )
snake_case__ = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case__ = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
snake_case__ = field(
default=UpperCamelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
snake_case__ = field(
default=UpperCamelCase__ , metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"} , )
snake_case__ = field(
default=UpperCamelCase__ , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
snake_case__ = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
snake_case__ = field(
default=UpperCamelCase__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
snake_case__ = field(
default=UpperCamelCase__ , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_xnli" , lowerCAmelCase_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCAmelCase__ = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase_ )
datasets.utils.logging.set_verbosity(lowerCAmelCase_ )
transformers.utils.logging.set_verbosity(lowerCAmelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
lowerCAmelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
lowerCAmelCase__ = load_dataset(
"xnli" , model_args.language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowerCAmelCase__ = load_dataset(
"xnli" , model_args.train_language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase__ = train_dataset.features["label"].names
if training_args.do_eval:
lowerCAmelCase__ = load_dataset(
"xnli" , model_args.language , split="validation" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase__ = eval_dataset.features["label"].names
if training_args.do_predict:
lowerCAmelCase__ = load_dataset(
"xnli" , model_args.language , split="test" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase__ = predict_dataset.features["label"].names
# Labels
lowerCAmelCase__ = len(lowerCAmelCase_ )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCAmelCase_ , idalabel={str(lowerCAmelCase_ ): label for i, label in enumerate(lowerCAmelCase_ )} , labelaid={label: i for i, label in enumerate(lowerCAmelCase_ )} , finetuning_task="xnli" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase__ = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
lowerCAmelCase__ = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCAmelCase__ = False
def preprocess_function(lowerCAmelCase_ : int ):
# Tokenize the texts
return tokenizer(
examples["premise"] , examples["hypothesis"] , padding=lowerCAmelCase_ , max_length=data_args.max_seq_length , truncation=lowerCAmelCase_ , )
if training_args.do_train:
if data_args.max_train_samples is not None:
lowerCAmelCase__ = min(len(lowerCAmelCase_ ) , data_args.max_train_samples )
lowerCAmelCase__ = train_dataset.select(range(lowerCAmelCase_ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
lowerCAmelCase__ = train_dataset.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on train dataset" , )
# Log a few random samples from the training set:
for index in random.sample(range(len(lowerCAmelCase_ ) ) , 3 ):
logger.info(F'Sample {index} of the training set: {train_dataset[index]}.' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowerCAmelCase__ = min(len(lowerCAmelCase_ ) , data_args.max_eval_samples )
lowerCAmelCase__ = eval_dataset.select(range(lowerCAmelCase_ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
lowerCAmelCase__ = eval_dataset.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on validation dataset" , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
lowerCAmelCase__ = min(len(lowerCAmelCase_ ) , data_args.max_predict_samples )
lowerCAmelCase__ = predict_dataset.select(range(lowerCAmelCase_ ) )
with training_args.main_process_first(desc="prediction dataset map pre-processing" ):
lowerCAmelCase__ = predict_dataset.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on prediction dataset" , )
# Get the metric function
lowerCAmelCase__ = evaluate.load("xnli" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase_ : EvalPrediction ):
lowerCAmelCase__ = p.predictions[0] if isinstance(p.predictions , lowerCAmelCase_ ) else p.predictions
lowerCAmelCase__ = np.argmax(lowerCAmelCase_ , axis=1 )
return metric.compute(predictions=lowerCAmelCase_ , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCAmelCase__ = default_data_collator
elif training_args.fpaa:
lowerCAmelCase__ = DataCollatorWithPadding(lowerCAmelCase_ , pad_to_multiple_of=8 )
else:
lowerCAmelCase__ = None
# Initialize our Trainer
lowerCAmelCase__ = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , )
# Training
if training_args.do_train:
lowerCAmelCase__ = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase__ = last_checkpoint
lowerCAmelCase__ = trainer.train(resume_from_checkpoint=lowerCAmelCase_ )
lowerCAmelCase__ = train_result.metrics
lowerCAmelCase__ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase_ )
)
lowerCAmelCase__ = min(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" , lowerCAmelCase_ )
trainer.save_metrics("train" , lowerCAmelCase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowerCAmelCase__ = trainer.evaluate(eval_dataset=lowerCAmelCase_ )
lowerCAmelCase__ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase_ )
lowerCAmelCase__ = min(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
trainer.log_metrics("eval" , lowerCAmelCase_ )
trainer.save_metrics("eval" , lowerCAmelCase_ )
# Prediction
if training_args.do_predict:
logger.info("*** Predict ***" )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = trainer.predict(lowerCAmelCase_ , metric_key_prefix="predict" )
lowerCAmelCase__ = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(lowerCAmelCase_ )
)
lowerCAmelCase__ = min(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
trainer.log_metrics("predict" , lowerCAmelCase_ )
trainer.save_metrics("predict" , lowerCAmelCase_ )
lowerCAmelCase__ = np.argmax(lowerCAmelCase_ , axis=1 )
lowerCAmelCase__ = os.path.join(training_args.output_dir , "predictions.txt" )
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , "w" ) as writer:
writer.write("index\tprediction\n" )
for index, item in enumerate(lowerCAmelCase_ ):
lowerCAmelCase__ = label_list[item]
writer.write(F'{index}\t{item}\n' )
if __name__ == "__main__":
main()
| 61 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class _a ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> List[str]:
return datasets.DatasetInfo(
features=datasets.Features({'content': datasets.Value('string' )} ) , supervised_keys=_UpperCAmelCase , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_dummy_examples()} )]
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase )
class _a ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Any:
return datasets.DatasetInfo(
features=datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) , supervised_keys=_UpperCAmelCase , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_nested_examples()} )
]
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase )
def _snake_case ():
return [(i, {"content": content}) for i, content in enumerate(['foo', 'bar', 'foobar'])]
def _snake_case ():
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['foo', 'bar', 'foobar'])]
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
@require_beam
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase_ = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
UpperCamelCase_ = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , _UpperCAmelCase )
self.assertEqual(dset['train'].info.splits['train'].num_examples , _UpperCAmelCase )
self.assertDictEqual(dset['train'][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def _UpperCAmelCase ( self ) -> List[str]:
import apache_beam as beam
UpperCamelCase_ = beam.io.parquetio.WriteToParquet
UpperCamelCase_ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase_ = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner='DirectRunner' )
with patch('apache_beam.io.parquetio.WriteToParquet' ) as write_parquet_mock:
UpperCamelCase_ = partial(_UpperCAmelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
UpperCamelCase_ = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , _UpperCAmelCase )
self.assertEqual(dset['train'].info.splits['train'].num_examples , _UpperCAmelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['train']['content'] ) , sorted(['foo', 'bar', 'foobar'] ) )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def _UpperCAmelCase ( self ) -> Any:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase_ = DummyBeamDataset(cache_dir=_UpperCAmelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase_ = NestedBeamDataset(cache_dir=_UpperCAmelCase , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) )
UpperCamelCase_ = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , _UpperCAmelCase )
self.assertEqual(dset['train'].info.splits['train'].num_examples , _UpperCAmelCase )
self.assertDictEqual(dset['train'][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
| 23 | 0 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def lowerCamelCase__ ( lowercase , lowercase=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = None
if token is not None:
SCREAMING_SNAKE_CASE : Any = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
SCREAMING_SNAKE_CASE : str = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
SCREAMING_SNAKE_CASE : Optional[int] = requests.get(lowercase , headers=lowercase ).json()
SCREAMING_SNAKE_CASE : List[str] = {}
try:
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
SCREAMING_SNAKE_CASE : List[str] = math.ceil((result["total_count"] - 100) / 100 )
for i in range(lowercase ):
SCREAMING_SNAKE_CASE : Dict = requests.get(url + F'''&page={i + 2}''' , headers=lowercase ).json()
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return job_links
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowerCamelCase__ ( lowercase , lowercase=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = None
if token is not None:
SCREAMING_SNAKE_CASE : str = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
SCREAMING_SNAKE_CASE : Optional[int] = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
SCREAMING_SNAKE_CASE : str = requests.get(lowercase , headers=lowercase ).json()
SCREAMING_SNAKE_CASE : Optional[Any] = {}
try:
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
SCREAMING_SNAKE_CASE : Dict = math.ceil((result["total_count"] - 100) / 100 )
for i in range(lowercase ):
SCREAMING_SNAKE_CASE : Dict = requests.get(url + F'''&page={i + 2}''' , headers=lowercase ).json()
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
return artifacts
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = None
if token is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
SCREAMING_SNAKE_CASE : Optional[int] = requests.get(lowercase , headers=lowercase , allow_redirects=lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = result.headers["Location"]
SCREAMING_SNAKE_CASE : List[str] = requests.get(lowercase , allow_redirects=lowercase )
SCREAMING_SNAKE_CASE : List[str] = os.path.join(lowercase , F'''{artifact_name}.zip''' )
with open(lowercase , "wb" ) as fp:
fp.write(response.content )
def lowerCamelCase__ ( lowercase , lowercase=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = []
SCREAMING_SNAKE_CASE : Any = []
SCREAMING_SNAKE_CASE : Any = None
with zipfile.ZipFile(lowercase ) as z:
for filename in z.namelist():
if not os.path.isdir(lowercase ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(lowercase ) as f:
for line in f:
SCREAMING_SNAKE_CASE : List[str] = line.decode("UTF-8" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
SCREAMING_SNAKE_CASE : Optional[Any] = line[: line.index(": " )]
SCREAMING_SNAKE_CASE : Any = line[line.index(": " ) + len(": " ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("FAILED " ):
# `test` is the test method that failed
SCREAMING_SNAKE_CASE : int = line[len("FAILED " ) :]
failed_tests.append(lowercase )
elif filename == "job_name.txt":
SCREAMING_SNAKE_CASE : Optional[Any] = line
if len(lowercase ) != len(lowercase ):
raise ValueError(
F'''`errors` and `failed_tests` should have the same number of elements. Got {len(lowercase )} for `errors` '''
F'''and {len(lowercase )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
" problem." )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if job_name and job_links:
SCREAMING_SNAKE_CASE : Tuple = job_links.get(lowercase , lowercase )
# A list with elements of the form (line of error, error, failed test)
SCREAMING_SNAKE_CASE : List[Any] = [x + [y] + [job_link] for x, y in zip(lowercase , lowercase )]
return result
def lowerCamelCase__ ( lowercase , lowercase=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = []
SCREAMING_SNAKE_CASE : Optional[int] = [os.path.join(lowercase , lowercase ) for p in os.listdir(lowercase ) if p.endswith(".zip" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(lowercase , job_links=lowercase ) )
return errors
def lowerCamelCase__ ( lowercase , lowercase=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = Counter()
counter.update([x[1] for x in logs] )
SCREAMING_SNAKE_CASE : str = counter.most_common()
SCREAMING_SNAKE_CASE : str = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
SCREAMING_SNAKE_CASE : List[str] = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]}
SCREAMING_SNAKE_CASE : Union[str, Any] = dict(sorted(r.items() , key=lambda lowercase : item[1]["count"] , reverse=lowercase ) )
return r
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = test.split("::" )[0]
if test.startswith("tests/models/" ):
SCREAMING_SNAKE_CASE : Dict = test.split("/" )[2]
else:
SCREAMING_SNAKE_CASE : Optional[int] = None
return test
def lowerCamelCase__ ( lowercase , lowercase=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = [(x[0], x[1], get_model(x[2] )) for x in logs]
SCREAMING_SNAKE_CASE : Dict = [x for x in logs if x[2] is not None]
SCREAMING_SNAKE_CASE : Optional[Any] = {x[2] for x in logs}
SCREAMING_SNAKE_CASE : List[Any] = {}
for test in tests:
SCREAMING_SNAKE_CASE : str = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
SCREAMING_SNAKE_CASE : Tuple = counter.most_common()
SCREAMING_SNAKE_CASE : Optional[Any] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
SCREAMING_SNAKE_CASE : Dict = sum(error_counts.values() )
if n_errors > 0:
SCREAMING_SNAKE_CASE : Dict = {"count": n_errors, "errors": error_counts}
SCREAMING_SNAKE_CASE : List[Any] = dict(sorted(r.items() , key=lambda lowercase : item[1]["count"] , reverse=lowercase ) )
return r
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = "| no. | error | status |"
SCREAMING_SNAKE_CASE : Tuple = "|-:|:-|:-|"
SCREAMING_SNAKE_CASE : int = [header, sep]
for error in reduced_by_error:
SCREAMING_SNAKE_CASE : Optional[int] = reduced_by_error[error]["count"]
SCREAMING_SNAKE_CASE : str = F'''| {count} | {error[:100]} | |'''
lines.append(lowercase )
return "\n".join(lowercase )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = "| model | no. of errors | major error | count |"
SCREAMING_SNAKE_CASE : Any = "|-:|-:|-:|-:|"
SCREAMING_SNAKE_CASE : int = [header, sep]
for model in reduced_by_model:
SCREAMING_SNAKE_CASE : Optional[int] = reduced_by_model[model]["count"]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = list(reduced_by_model[model]["errors"].items() )[0]
SCREAMING_SNAKE_CASE : List[str] = F'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(lowercase )
return "\n".join(lowercase )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
snake_case = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
snake_case = get_job_links(args.workflow_run_id, token=args.token)
snake_case = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
snake_case = k.find(""" / """)
snake_case = k[index + len(""" / """) :]
snake_case = v
with open(os.path.join(args.output_dir, """job_links.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
snake_case = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
snake_case = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
snake_case = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
snake_case = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, """errors.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
snake_case = reduce_by_error(errors)
snake_case = reduce_by_model(errors)
snake_case = make_github_table(reduced_by_error)
snake_case = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, """reduced_by_error.txt"""), """w""", encoding="""UTF-8""") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, """reduced_by_model.txt"""), """w""", encoding="""UTF-8""") as fp:
fp.write(sa)
| 62 |
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def _snake_case (__lowercase , __lowercase , __lowercase):
# Initialise PyTorch model
UpperCamelCase_ = AlbertConfig.from_json_file(__lowercase)
print(f"""Building PyTorch model from configuration: {config}""")
UpperCamelCase_ = AlbertForPreTraining(__lowercase)
# Load weights from tf checkpoint
load_tf_weights_in_albert(__lowercase , __lowercase , __lowercase)
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""")
torch.save(model.state_dict() , __lowercase)
if __name__ == "__main__":
snake_case__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--albert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained ALBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
snake_case__ : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 23 | 0 |
def lowerCamelCase__ ( __lowerCamelCase : list ):
if len(__lowerCamelCase ) <= 1:
return lst
__UpperCAmelCase : Optional[Any] = 1
while i < len(__lowerCamelCase ):
if lst[i - 1] <= lst[i]:
i += 1
else:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = lst[i], lst[i - 1]
i -= 1
if i == 0:
__UpperCAmelCase : Optional[int] = 1
return lst
if __name__ == "__main__":
a : Tuple = input("Enter numbers separated by a comma:\n").strip()
a : Union[str, Any] = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 63 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
@slow
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' )
UpperCamelCase_ = BertTokenizer.from_pretrained('bert-base-uncased' )
UpperCamelCase_ = bertabert.config.encoder.vocab_size
UpperCamelCase_ = tokenizer.sep_token_id
UpperCamelCase_ = tokenizer.cls_token_id
UpperCamelCase_ = 128
UpperCamelCase_ = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' )
UpperCamelCase_ = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' )
UpperCamelCase_ = train_dataset.select(range(32 ) )
UpperCamelCase_ = val_dataset.select(range(16 ) )
UpperCamelCase_ = 4
def _map_to_encoder_decoder_inputs(_UpperCAmelCase ):
# Tokenizer will automatically set [BOS] <text> [EOS]
UpperCamelCase_ = tokenizer(batch['article'] , padding='max_length' , truncation=_UpperCAmelCase , max_length=512 )
UpperCamelCase_ = tokenizer(batch['highlights'] , padding='max_length' , truncation=_UpperCAmelCase , max_length=128 )
UpperCamelCase_ = inputs.input_ids
UpperCamelCase_ = inputs.attention_mask
UpperCamelCase_ = outputs.input_ids
UpperCamelCase_ = outputs.input_ids.copy()
UpperCamelCase_ = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
UpperCamelCase_ = outputs.attention_mask
assert all(len(_UpperCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(_UpperCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_UpperCAmelCase ):
UpperCamelCase_ = pred.label_ids
UpperCamelCase_ = pred.predictions
# all unnecessary tokens are removed
UpperCamelCase_ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
UpperCamelCase_ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
UpperCamelCase_ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_UpperCAmelCase ) )] ) / len(_UpperCAmelCase )
return {"accuracy": accuracy}
# map train dataset
UpperCamelCase_ = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_UpperCAmelCase , batch_size=_UpperCAmelCase , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
UpperCamelCase_ = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_UpperCAmelCase , batch_size=_UpperCAmelCase , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = SeqaSeqTrainingArguments(
output_dir=_UpperCAmelCase , per_device_train_batch_size=_UpperCAmelCase , per_device_eval_batch_size=_UpperCAmelCase , predict_with_generate=_UpperCAmelCase , evaluation_strategy='steps' , do_train=_UpperCAmelCase , do_eval=_UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
UpperCamelCase_ = SeqaSeqTrainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , tokenizer=_UpperCAmelCase , )
# start training
trainer.train()
| 23 | 0 |
def A__ ( snake_case_ : int ):
return str(snake_case_ ) == str(snake_case_ )[::-1]
def A__ ( snake_case_ : int ):
return int(snake_case_ ) + int(str(snake_case_ )[::-1] )
def A__ ( snake_case_ : int = 10_000 ):
SCREAMING_SNAKE_CASE__: Dict= []
for num in range(1 , snake_case_ ):
SCREAMING_SNAKE_CASE__: List[Any]= 0
SCREAMING_SNAKE_CASE__: Optional[Any]= num
while iterations < 50:
SCREAMING_SNAKE_CASE__: Optional[int]= sum_reverse(snake_case_ )
iterations += 1
if is_palindrome(snake_case_ ):
break
else:
lychrel_nums.append(snake_case_ )
return len(snake_case_ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 64 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
snake_case__ : Dict = 1_6
snake_case__ : List[str] = 3_2
def _snake_case (__lowercase , __lowercase = 16):
UpperCamelCase_ = AutoTokenizer.from_pretrained('bert-base-cased')
UpperCamelCase_ = load_dataset('glue' , 'mrpc')
def tokenize_function(__lowercase):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase_ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__lowercase , max_length=__lowercase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase_ = datasets.map(
__lowercase , batched=__lowercase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase_ = tokenized_datasets.rename_column('label' , 'labels')
def collate_fn(__lowercase):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase_ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase_ = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase_ = 8
else:
UpperCamelCase_ = None
return tokenizer.pad(
__lowercase , padding='longest' , max_length=__lowercase , pad_to_multiple_of=__lowercase , return_tensors='pt' , )
# Instantiate dataloaders.
UpperCamelCase_ = DataLoader(
tokenized_datasets['train'] , shuffle=__lowercase , collate_fn=__lowercase , batch_size=__lowercase)
UpperCamelCase_ = DataLoader(
tokenized_datasets['validation'] , shuffle=__lowercase , collate_fn=__lowercase , batch_size=__lowercase)
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
snake_case__ : List[str] = mocked_dataloaders # noqa: F811
def _snake_case (__lowercase , __lowercase):
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , __lowercase) == "1":
UpperCamelCase_ = 2
# New Code #
UpperCamelCase_ = int(args.gradient_accumulation_steps)
# Initialize accelerator
UpperCamelCase_ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__lowercase)
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`')
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase_ = config['lr']
UpperCamelCase_ = int(config['num_epochs'])
UpperCamelCase_ = int(config['seed'])
UpperCamelCase_ = int(config['batch_size'])
UpperCamelCase_ = evaluate.load('glue' , 'mrpc')
set_seed(__lowercase)
UpperCamelCase_ , UpperCamelCase_ = get_dataloaders(__lowercase , __lowercase)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase_ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=__lowercase)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase_ = model.to(accelerator.device)
# Instantiate optimizer
UpperCamelCase_ = AdamW(params=model.parameters() , lr=__lowercase)
# Instantiate scheduler
UpperCamelCase_ = get_linear_schedule_with_warmup(
optimizer=__lowercase , num_warmup_steps=100 , num_training_steps=(len(__lowercase) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = accelerator.prepare(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase)
# Now we train the model
for epoch in range(__lowercase):
model.train()
for step, batch in enumerate(__lowercase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__lowercase):
UpperCamelCase_ = model(**__lowercase)
UpperCamelCase_ = output.loss
accelerator.backward(__lowercase)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowercase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
UpperCamelCase_ = model(**__lowercase)
UpperCamelCase_ = outputs.logits.argmax(dim=-1)
UpperCamelCase_ , UpperCamelCase_ = accelerator.gather_for_metrics((predictions, batch['labels']))
metric.add_batch(
predictions=__lowercase , references=__lowercase , )
UpperCamelCase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __lowercase)
def _snake_case ():
UpperCamelCase_ = argparse.ArgumentParser(description='Simple example of training script.')
parser.add_argument(
'--mixed_precision' , type=__lowercase , default=__lowercase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=__lowercase , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.')
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(__lowercase , __lowercase)
if __name__ == "__main__":
main()
| 23 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class __lowercase :
snake_case_ = 42
snake_case_ = None
snake_case_ = None
def lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = Node(1 )
UpperCAmelCase__ : int = Node(2 )
UpperCAmelCase__ : str = Node(3 )
UpperCAmelCase__ : List[str] = Node(4 )
UpperCAmelCase__ : int = Node(5 )
return tree
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : list[Any] = []
if root is None:
return output
UpperCAmelCase__ : int = deque([root] )
while process_queue:
UpperCAmelCase__ : Union[str, Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : list[Any] = []
def populate_output(__UpperCamelCase , __UpperCamelCase ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(__UpperCamelCase , __UpperCamelCase )
return output
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : list[Any] = []
def populate_output(__UpperCamelCase , __UpperCamelCase ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(__UpperCamelCase , __UpperCamelCase )
return output
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
if root is None:
return []
UpperCAmelCase__ : list[Sequence[Node | None]] = []
UpperCAmelCase__ : Dict = 0
UpperCAmelCase__ : Union[str, Any] = height(__UpperCamelCase )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(__UpperCamelCase , __UpperCamelCase ) )
UpperCAmelCase__ : Tuple = 1
else:
output.append(get_nodes_from_right_to_left(__UpperCamelCase , __UpperCamelCase ) )
UpperCAmelCase__ : Union[str, Any] = 0
return output
def lowerCAmelCase ( ): # Main function for testing.
'''simple docstring'''
UpperCAmelCase__ : Tuple = make_tree()
print(F"In-order Traversal: {inorder(__UpperCamelCase )}" )
print(F"Pre-order Traversal: {preorder(__UpperCamelCase )}" )
print(F"Post-order Traversal: {postorder(__UpperCamelCase )}" , """\n""" )
print(F"Height of Tree: {height(__UpperCamelCase )}" , """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(__UpperCamelCase ) , """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 , height(__UpperCamelCase ) + 1 ):
print(F"Level {level}:" , get_nodes_from_left_to_right(__UpperCamelCase , level=__UpperCamelCase ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(__UpperCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 65 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class _a :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=2 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=10 , _UpperCAmelCase=3 , _UpperCAmelCase=32 * 8 , _UpperCAmelCase=32 * 8 , _UpperCAmelCase=4 , _UpperCAmelCase=64 , ) -> List[Any]:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = is_training
UpperCamelCase_ = use_auxiliary_loss
UpperCamelCase_ = num_queries
UpperCamelCase_ = num_channels
UpperCamelCase_ = min_size
UpperCamelCase_ = max_size
UpperCamelCase_ = num_labels
UpperCamelCase_ = hidden_dim
UpperCamelCase_ = hidden_dim
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_UpperCAmelCase )
UpperCamelCase_ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_UpperCAmelCase )
UpperCamelCase_ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_UpperCAmelCase ) > 0.5
).float()
UpperCamelCase_ = (torch.rand((self.batch_size, self.num_labels) , device=_UpperCAmelCase ) > 0.5).long()
UpperCamelCase_ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
UpperCamelCase_ = self.num_queries
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = [1, 1, 1, 1]
UpperCamelCase_ = self.num_channels
UpperCamelCase_ = 64
UpperCamelCase_ = 128
UpperCamelCase_ = self.hidden_dim
UpperCamelCase_ = self.hidden_dim
UpperCamelCase_ = self.hidden_dim
return config
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
UpperCamelCase_ = output.encoder_hidden_states
UpperCamelCase_ = output.pixel_decoder_hidden_states
UpperCamelCase_ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_UpperCAmelCase ) , config.decoder_layers )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Any:
with torch.no_grad():
UpperCamelCase_ = MaskaFormerModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase )
UpperCamelCase_ = model(_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
UpperCamelCase_ = MaskaFormerForUniversalSegmentation(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
def comm_check_on_output(_UpperCAmelCase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCamelCase_ = model(pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase )
UpperCamelCase_ = model(_UpperCAmelCase )
comm_check_on_output(_UpperCAmelCase )
UpperCamelCase_ = model(
pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase )
comm_check_on_output(_UpperCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
A_ = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {}
A_ = False
A_ = False
A_ = False
A_ = False
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = MaskaFormerModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_UpperCAmelCase , **_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_UpperCAmelCase )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Any:
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def _UpperCAmelCase ( self ) -> Any:
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _UpperCAmelCase ( self ) -> int:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> str:
pass
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_UpperCAmelCase )
UpperCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ = [*signature.parameters.keys()]
UpperCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
@slow
def _UpperCAmelCase ( self ) -> Tuple:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
UpperCamelCase_ = MaskaFormerModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = (self.model_tester.min_size,) * 2
UpperCamelCase_ = {
'pixel_values': torch.randn((2, 3, *size) , device=_UpperCAmelCase ),
'mask_labels': torch.randn((2, 10, *size) , device=_UpperCAmelCase ),
'class_labels': torch.zeros(2 , 10 , device=_UpperCAmelCase ).long(),
}
UpperCamelCase_ = self.model_tester.get_config()
UpperCamelCase_ = MaskaFormerForUniversalSegmentation(_UpperCAmelCase ).to(_UpperCAmelCase )
UpperCamelCase_ = model(**_UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_UpperCAmelCase , **_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_UpperCAmelCase ).to(_UpperCAmelCase )
UpperCamelCase_ = model(**_UpperCAmelCase , output_attentions=_UpperCAmelCase )
self.assertTrue(outputs.attentions is not None )
def _UpperCAmelCase ( self ) -> List[Any]:
if not self.model_tester.is_training:
return
UpperCamelCase_ = self.all_model_classes[1]
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
UpperCamelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
UpperCamelCase_ = model(_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase ).loss
loss.backward()
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = self.all_model_classes[1]
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
UpperCamelCase_ = True
UpperCamelCase_ = True
UpperCamelCase_ = model_class(_UpperCAmelCase ).to(_UpperCAmelCase )
model.train()
UpperCamelCase_ = model(_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase )
UpperCamelCase_ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCamelCase_ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
UpperCamelCase_ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCamelCase_ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_UpperCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
snake_case__ : List[Any] = 1E-4
def _snake_case ():
UpperCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_vision
@slow
class _a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCAmelCase ( self ) -> Optional[int]:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _UpperCAmelCase ( self ) -> List[str]:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase )
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
UpperCamelCase_ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCAmelCase , (1, 3, 384, 384) )
with torch.no_grad():
UpperCamelCase_ = model(**_UpperCAmelCase )
UpperCamelCase_ = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
UpperCamelCase_ = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
UpperCamelCase_ = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase ).eval()
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
UpperCamelCase_ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCAmelCase , (1, 3, 384, 384) )
with torch.no_grad():
UpperCamelCase_ = model(**_UpperCAmelCase )
# masks_queries_logits
UpperCamelCase_ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
UpperCamelCase_ = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
UpperCamelCase_ = torch.tensor(_UpperCAmelCase ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
# class_queries_logits
UpperCamelCase_ = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
UpperCamelCase_ = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase ).eval()
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , )
UpperCamelCase_ = inputs['pixel_values'].to(_UpperCAmelCase )
UpperCamelCase_ = [el.to(_UpperCAmelCase ) for el in inputs['mask_labels']]
UpperCamelCase_ = [el.to(_UpperCAmelCase ) for el in inputs['class_labels']]
with torch.no_grad():
UpperCamelCase_ = model(**_UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
| 23 | 0 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( __snake_case ):
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 66 |
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
snake_case__ : List[str] = logging.get_logger(__name__)
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = """vision-encoder-decoder"""
A_ = True
def __init__( self , **_UpperCAmelCase ) -> Dict:
super().__init__(**_UpperCAmelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f"""A configuraton of type {self.model_type} cannot be instantiated because """
f"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
UpperCamelCase_ = kwargs.pop('encoder' )
UpperCamelCase_ = encoder_config.pop('model_type' )
UpperCamelCase_ = kwargs.pop('decoder' )
UpperCamelCase_ = decoder_config.pop('model_type' )
UpperCamelCase_ = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
UpperCamelCase_ = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
UpperCamelCase_ = True
@classmethod
def _UpperCAmelCase ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> PretrainedConfig:
logger.info('Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
UpperCamelCase_ = True
UpperCamelCase_ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = copy.deepcopy(self.__dict__ )
UpperCamelCase_ = self.encoder.to_dict()
UpperCamelCase_ = self.decoder.to_dict()
UpperCamelCase_ = self.__class__.model_type
return output
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = version.parse("""1.11""" )
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _UpperCAmelCase ( self ) -> float:
return 1e-4
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict({'last_hidden_state': {0: 'batch', 1: 'encoder_sequence'}} )
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
UpperCamelCase_ = OrderedDict()
UpperCamelCase_ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
UpperCamelCase_ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
UpperCamelCase_ = {0: 'batch', 1: 'encoder_sequence'}
return common_inputs
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ) -> Mapping[str, Any]:
import torch
UpperCamelCase_ = OrderedDict()
UpperCamelCase_ = super().generate_dummy_inputs(
_UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = dummy_input['input_ids'].shape
UpperCamelCase_ = (batch, encoder_sequence, self._config.encoder_hidden_size)
UpperCamelCase_ = dummy_input.pop('input_ids' )
UpperCamelCase_ = dummy_input.pop('attention_mask' )
UpperCamelCase_ = torch.zeros(_UpperCAmelCase )
return common_inputs
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
@property
def _UpperCAmelCase ( self ) -> None:
pass
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> OnnxConfig:
return VisionEncoderDecoderEncoderOnnxConfig(_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = "default" ) -> OnnxConfig:
UpperCamelCase_ = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_UpperCAmelCase , _UpperCAmelCase )
| 23 | 0 |
import re
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> str:
if len(re.findall('[ATCG]' , snake_case__ ) ) != len(snake_case__ ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 67 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _snake_case (__lowercase , __lowercase , __lowercase):
# Initialise PyTorch model
UpperCamelCase_ = MobileBertConfig.from_json_file(__lowercase)
print(f"""Building PyTorch model from configuration: {config}""")
UpperCamelCase_ = MobileBertForPreTraining(__lowercase)
# Load weights from tf checkpoint
UpperCamelCase_ = load_tf_weights_in_mobilebert(__lowercase , __lowercase , __lowercase)
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""")
torch.save(model.state_dict() , __lowercase)
if __name__ == "__main__":
snake_case__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--mobilebert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained MobileBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
snake_case__ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 23 | 0 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def lowercase__ ( A_: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__UpperCAmelCase =k.replace(A_ , A_ )
if k.startswith("""encoder""" ):
__UpperCAmelCase =k.replace(""".attn""" , """.self_attn""" )
__UpperCAmelCase =k.replace("""norm1""" , """self_attn_layer_norm""" )
__UpperCAmelCase =k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
__UpperCAmelCase =k.replace("""norm1""" , """self_attn_layer_norm""" )
__UpperCAmelCase =k.replace("""norm2""" , """encoder_attn_layer_norm""" )
__UpperCAmelCase =k.replace("""norm3""" , """final_layer_norm""" )
return k
def lowercase__ ( A_: Tuple ) -> str:
"""simple docstring"""
__UpperCAmelCase =[
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
__UpperCAmelCase =sd.pop(A_ )
__UpperCAmelCase =k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
__UpperCAmelCase =v
__A = ["START"]
@torch.no_grad()
def lowercase__ ( A_: List[Any] , A_: str , A_: int ) -> Optional[int]:
"""simple docstring"""
__UpperCAmelCase =torch.load(A_ , map_location="""cpu""" )
__UpperCAmelCase =model["""model"""]
__UpperCAmelCase =BlenderbotConfig.from_json_file(A_ )
__UpperCAmelCase =BlenderbotForConditionalGeneration(A_ )
__UpperCAmelCase =m.model.state_dict().keys()
__UpperCAmelCase =[]
__UpperCAmelCase ={}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__UpperCAmelCase =rename_state_dict_key(A_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__UpperCAmelCase =v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(A_ )
m.model.load_state_dict(A_ , strict=A_ )
m.half()
m.save_pretrained(A_ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
__A = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 68 |
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _a ( unittest.TestCase ):
"""simple docstring"""
A_ = MODEL_FOR_MASKED_LM_MAPPING
A_ = TF_MODEL_FOR_MASKED_LM_MAPPING
def _UpperCAmelCase ( self ) -> List[str]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='tf' )
UpperCamelCase_ = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{'sequence': 'My name is grouped', 'score': 2.1e-05, 'token': 38015, 'token_str': ' grouped'},
{'sequence': 'My name is accuser', 'score': 2.1e-05, 'token': 25506, 'token_str': ' accuser'},
] , )
UpperCamelCase_ = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{
'sequence': 'The largest city in France is grouped',
'score': 2.1e-05,
'token': 38015,
'token_str': ' grouped',
},
{
'sequence': 'The largest city in France is accuser',
'score': 2.1e-05,
'token': 25506,
'token_str': ' accuser',
},
] , )
UpperCamelCase_ = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{'sequence': 'My name is Clara', 'score': 2e-05, 'token': 13606, 'token_str': ' Clara'},
{'sequence': 'My name is Patrick', 'score': 2e-05, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 1.9e-05, 'token': 2941, 'token_str': ' Te'},
] , )
@require_torch
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='pt' )
UpperCamelCase_ = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{'sequence': 'My name is Maul', 'score': 2.2e-05, 'token': 35676, 'token_str': ' Maul'},
{'sequence': 'My name isELS', 'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS'},
] , )
UpperCamelCase_ = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{
'sequence': 'The largest city in France is Maul',
'score': 2.2e-05,
'token': 35676,
'token_str': ' Maul',
},
{'sequence': 'The largest city in France isELS', 'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS'},
] , )
UpperCamelCase_ = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{'sequence': 'My name is Patrick', 'score': 2.1e-05, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 2e-05, 'token': 2941, 'token_str': ' Te'},
{'sequence': 'My name is Clara', 'score': 2e-05, 'token': 13606, 'token_str': ' Clara'},
] , )
UpperCamelCase_ = unmasker('My name is <mask> <mask>' , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
[
{
'score': 2.2e-05,
'token': 35676,
'token_str': ' Maul',
'sequence': '<s>My name is Maul<mask></s>',
},
{'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name isELS<mask></s>'},
],
[
{
'score': 2.2e-05,
'token': 35676,
'token_str': ' Maul',
'sequence': '<s>My name is<mask> Maul</s>',
},
{'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name is<mask>ELS</s>'},
],
] , )
@require_torch_gpu
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = pipeline('fill-mask' , model='hf-internal-testing/tiny-random-distilbert' , device=0 , framework='pt' )
# convert model to fp16
pipe.model.half()
UpperCamelCase_ = pipe('Paris is the [MASK] of France.' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
@slow
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='pt' )
self.run_large_test(_UpperCAmelCase )
@slow
@require_tf
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='tf' )
self.run_large_test(_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Tuple:
UpperCamelCase_ = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , [
{'sequence': 'My name is John', 'score': 0.0_0_8, 'token': 610, 'token_str': ' John'},
{'sequence': 'My name is Chris', 'score': 0.0_0_7, 'token': 1573, 'token_str': ' Chris'},
] , )
UpperCamelCase_ = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , [
{
'sequence': 'The largest city in France is Paris',
'score': 0.2_5_1,
'token': 2201,
'token_str': ' Paris',
},
{
'sequence': 'The largest city in France is Lyon',
'score': 0.2_1_4,
'token': 12790,
'token_str': ' Lyon',
},
] , )
UpperCamelCase_ = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , [
{'sequence': 'My name is Patrick', 'score': 0.0_0_5, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Clara', 'score': 0.0_0_0, 'token': 13606, 'token_str': ' Clara'},
{'sequence': 'My name is Te', 'score': 0.0_0_0, 'token': 2941, 'token_str': ' Te'},
] , )
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='pt' )
UpperCamelCase_ = None
UpperCamelCase_ = None
self.run_pipeline_test(_UpperCAmelCase , [] )
@require_tf
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='tf' )
UpperCamelCase_ = None
UpperCamelCase_ = None
self.run_pipeline_test(_UpperCAmelCase , [] )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('The provided tokenizer has no mask token, (probably reformer or wav2vec2)' )
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = [
f"""This is another {tokenizer.mask_token} test""",
]
return fill_masker, examples
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
UpperCamelCase_ = fill_masker.tokenizer
UpperCamelCase_ = fill_masker.model
UpperCamelCase_ = fill_masker(
f"""This is a {tokenizer.mask_token}""" , )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = fill_masker([f"""This is a {tokenizer.mask_token}"""] )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = fill_masker([f"""This is a {tokenizer.mask_token}""", f"""Another {tokenizer.mask_token} great test."""] )
self.assertEqual(
_UpperCAmelCase , [
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
] , )
with self.assertRaises(_UpperCAmelCase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_UpperCAmelCase ):
fill_masker('This is' )
self.run_test_top_k(_UpperCAmelCase , _UpperCAmelCase )
self.run_test_targets(_UpperCAmelCase , _UpperCAmelCase )
self.run_test_top_k_targets(_UpperCAmelCase , _UpperCAmelCase )
self.fill_mask_with_duplicate_targets_and_top_k(_UpperCAmelCase , _UpperCAmelCase )
self.fill_mask_with_multiple_masks(_UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
UpperCamelCase_ = tokenizer.get_vocab()
UpperCamelCase_ = sorted(vocab.keys() )[:2]
# Pipeline argument
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , targets=_UpperCAmelCase )
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , _UpperCAmelCase )
UpperCamelCase_ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(_UpperCAmelCase ) )
# Call argument
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , _UpperCAmelCase )
UpperCamelCase_ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(_UpperCAmelCase ) )
# Score equivalence
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase )
UpperCamelCase_ = [top_mask['token_str'] for top_mask in outputs]
UpperCamelCase_ = [top_mask['score'] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_UpperCAmelCase ) == set(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase )
UpperCamelCase_ = [top_mask['score'] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) )
# Raises with invalid
with self.assertRaises(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[''] )
with self.assertRaises(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets='' )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , top_k=2 )
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
UpperCamelCase_ = tokenizer.get_vocab()
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
# top_k=2, ntargets=3
UpperCamelCase_ = sorted(vocab.keys() )[:3]
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=_UpperCAmelCase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
UpperCamelCase_ = [el['token_str'] for el in sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x["score"] , reverse=_UpperCAmelCase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_UpperCAmelCase ).issubset(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=_UpperCAmelCase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = tokenizer.get_vocab()
# String duplicates + id duplicates
UpperCamelCase_ = sorted(vocab.keys() )[:3]
UpperCamelCase_ = [targets[0], targets[1], targets[0], targets[2], targets[1]]
UpperCamelCase_ = fill_masker(f"""My name is {tokenizer.mask_token}""" , targets=_UpperCAmelCase , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_UpperCAmelCase ) , 3 )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = fill_masker(
f"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
_UpperCAmelCase , [
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
] , )
| 23 | 0 |
'''simple docstring'''
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
a : Any = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["""pixel_values"""]
def __init__( self : str , a_ : bool = True , a_ : Union[int, float] = 1 / 255 , a_ : bool = True , a_ : int = 8 , **a_ : Any , ):
"""simple docstring"""
super().__init__(**a_ )
__snake_case = do_rescale
__snake_case = rescale_factor
__snake_case = do_pad
__snake_case = pad_size
def A ( self : Optional[Any] , a_ : np.ndarray , a_ : float , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : Tuple ):
"""simple docstring"""
return rescale(a_ , scale=a_ , data_format=a_ , **a_ )
def A ( self : Optional[int] , a_ : np.ndarray , a_ : int , a_ : Optional[Union[str, ChannelDimension]] = None ):
"""simple docstring"""
__snake_case , __snake_case = get_image_size(a_ )
__snake_case = (old_height // size + 1) * size - old_height
__snake_case = (old_width // size + 1) * size - old_width
return pad(a_ , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=a_ )
def A ( self : int , a_ : ImageInput , a_ : Optional[bool] = None , a_ : Optional[float] = None , a_ : Optional[bool] = None , a_ : Optional[int] = None , a_ : Optional[Union[str, TensorType]] = None , a_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **a_ : str , ):
"""simple docstring"""
__snake_case = do_rescale if do_rescale is not None else self.do_rescale
__snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case = do_pad if do_pad is not None else self.do_pad
__snake_case = pad_size if pad_size is not None else self.pad_size
__snake_case = make_list_of_images(a_ )
if not valid_images(a_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
__snake_case = [to_numpy_array(a_ ) for image in images]
if do_rescale:
__snake_case = [self.rescale(image=a_ , scale=a_ ) for image in images]
if do_pad:
__snake_case = [self.pad(a_ , size=a_ ) for image in images]
__snake_case = [to_channel_dimension_format(a_ , a_ ) for image in images]
__snake_case = {"pixel_values": images}
return BatchFeature(data=a_ , tensor_type=a_ )
| 69 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = StableDiffusionSAGPipeline
A_ = TEXT_TO_IMAGE_PARAMS
A_ = TEXT_TO_IMAGE_BATCH_PARAMS
A_ = TEXT_TO_IMAGE_IMAGE_PARAMS
A_ = TEXT_TO_IMAGE_IMAGE_PARAMS
A_ = False
def _UpperCAmelCase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCamelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
UpperCamelCase_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , )
torch.manual_seed(0 )
UpperCamelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCamelCase_ = CLIPTextModel(_UpperCAmelCase )
UpperCamelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCamelCase_ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase=0 ) -> List[Any]:
if str(_UpperCAmelCase ).startswith('mps' ):
UpperCamelCase_ = torch.manual_seed(_UpperCAmelCase )
else:
UpperCamelCase_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
UpperCamelCase_ = {
'prompt': '.',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 1.0,
'sag_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def _UpperCAmelCase ( self ) -> Tuple:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
UpperCamelCase_ = sag_pipe.to(_UpperCAmelCase )
sag_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCamelCase_ = '.'
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = sag_pipe(
[prompt] , generator=_UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
UpperCamelCase_ = output.images
UpperCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase_ = np.array([0.1_5_6_8, 0.1_7_3_8, 0.1_6_9_5, 0.1_6_9_3, 0.1_5_0_7, 0.1_7_0_5, 0.1_5_4_7, 0.1_7_5_1, 0.1_9_4_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
UpperCamelCase_ = sag_pipe.to(_UpperCAmelCase )
sag_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCamelCase_ = '.'
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = sag_pipe(
[prompt] , generator=_UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
UpperCamelCase_ = output.images
UpperCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase_ = np.array([0.3_4_5_9, 0.2_8_7_6, 0.2_5_3_7, 0.3_0_0_2, 0.2_6_7_1, 0.2_1_6_0, 0.3_0_2_6, 0.2_2_6_2, 0.2_3_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
UpperCamelCase_ = sag_pipe.to(_UpperCAmelCase )
sag_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCamelCase_ = '.'
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = sag_pipe(
[prompt] , width=768 , height=512 , generator=_UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' , )
UpperCamelCase_ = output.images
assert image.shape == (1, 512, 768, 3)
| 23 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCamelCase : Any = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
lowerCamelCase_ = get_sagemaker_input()
else:
lowerCamelCase_ = get_cluster_input()
return config
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any]=None ):
'''simple docstring'''
if subparsers is not None:
lowerCamelCase_ = subparsers.add_parser('config' , description=lowercase )
else:
lowerCamelCase_ = argparse.ArgumentParser('Accelerate config command' , description=lowercase )
parser.add_argument(
'--config_file' , default=lowercase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=lowercase )
return parser
def _SCREAMING_SNAKE_CASE ( lowercase : Dict ):
'''simple docstring'''
lowerCamelCase_ = get_user_input()
if args.config_file is not None:
lowerCamelCase_ = args.config_file
else:
if not os.path.isdir(lowercase ):
os.makedirs(lowercase )
lowerCamelCase_ = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(lowercase )
else:
config.to_yaml_file(lowercase )
print(f"""accelerate configuration saved at {config_file}""" )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = config_command_parser()
lowerCamelCase_ = parser.parse_args()
config_command(lowercase )
if __name__ == "__main__":
main()
| 70 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
snake_case__ : List[str] = TypeVar("""T""")
def _snake_case (__lowercase):
return (position - 1) // 2
def _snake_case (__lowercase):
return (2 * position) + 1
def _snake_case (__lowercase):
return (2 * position) + 2
class _a ( Generic[T] ):
"""simple docstring"""
def __init__( self ) -> None:
UpperCamelCase_ = []
UpperCamelCase_ = {}
UpperCamelCase_ = 0
def __len__( self ) -> int:
return self.elements
def __repr__( self ) -> str:
return str(self.heap )
def _UpperCAmelCase ( self ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
UpperCamelCase_ = self.elements
self.elements += 1
self._bubble_up(_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
UpperCamelCase_ , UpperCamelCase_ = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
UpperCamelCase_ , UpperCamelCase_ = self.heap[0]
self._bubble_down(_UpperCAmelCase )
return elem
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> None:
# Update the weight of the given key
UpperCamelCase_ = self.position_map[elem]
UpperCamelCase_ = (elem, weight)
if position > 0:
UpperCamelCase_ = get_parent_position(_UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(_UpperCAmelCase )
else:
self._bubble_down(_UpperCAmelCase )
else:
self._bubble_down(_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
UpperCamelCase_ = self.position_map[elem]
if curr_pos == 0:
return None
UpperCamelCase_ = get_parent_position(_UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = self.heap[curr_pos]
UpperCamelCase_ , UpperCamelCase_ = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_up(_UpperCAmelCase )
return None
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
UpperCamelCase_ = self.position_map[elem]
UpperCamelCase_ , UpperCamelCase_ = self.heap[curr_pos]
UpperCamelCase_ = get_child_left_position(_UpperCAmelCase )
UpperCamelCase_ = get_child_right_position(_UpperCAmelCase )
if child_left_position < self.elements and child_right_position < self.elements:
UpperCamelCase_ , UpperCamelCase_ = self.heap[child_left_position]
UpperCamelCase_ , UpperCamelCase_ = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_down(_UpperCAmelCase )
if child_left_position < self.elements:
UpperCamelCase_ , UpperCamelCase_ = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_down(_UpperCAmelCase )
else:
return None
if child_right_position < self.elements:
UpperCamelCase_ , UpperCamelCase_ = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_down(_UpperCAmelCase )
return None
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> None:
# Swap the nodes at the given positions
UpperCamelCase_ = self.heap[nodea_pos][0]
UpperCamelCase_ = self.heap[nodea_pos][0]
UpperCamelCase_ , UpperCamelCase_ = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
UpperCamelCase_ = nodea_pos
UpperCamelCase_ = nodea_pos
class _a ( Generic[T] ):
"""simple docstring"""
def __init__( self ) -> None:
UpperCamelCase_ = {}
UpperCamelCase_ = 0
def __repr__( self ) -> str:
return str(self.connections )
def __len__( self ) -> int:
return self.nodes
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
UpperCamelCase_ = {}
self.nodes += 1
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(_UpperCAmelCase )
self.add_node(_UpperCAmelCase )
UpperCamelCase_ = weight
UpperCamelCase_ = weight
def _snake_case (__lowercase , ):
UpperCamelCase_ = {node: maxsize for node in graph.connections}
UpperCamelCase_ = {node: None for node in graph.connections}
UpperCamelCase_ = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(__lowercase , __lowercase)
if priority_queue.is_empty():
return dist, parent
# initialization
UpperCamelCase_ = priority_queue.extract_min()
UpperCamelCase_ = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
UpperCamelCase_ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowercase , dist[neighbour])
UpperCamelCase_ = node
# running prim's algorithm
while not priority_queue.is_empty():
UpperCamelCase_ = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
UpperCamelCase_ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowercase , dist[neighbour])
UpperCamelCase_ = node
return dist, parent
| 23 | 0 |
'''simple docstring'''
from math import sqrt
def a__ ( _SCREAMING_SNAKE_CASE : int = 1_00_00_00 ) -> int:
"""simple docstring"""
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_SCREAMING_SNAKE_CASE , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f"""{solution() = }""")
| 71 |
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
snake_case__ : Dict = TypeVar("""T""")
class _a ( Generic[T] ):
"""simple docstring"""
A_ = 42 # Cache store of keys
A_ = 42 # References of the keys in cache
A_ = 10 # Maximum capacity of cache
def __init__( self , _UpperCAmelCase ) -> None:
UpperCamelCase_ = deque()
UpperCamelCase_ = set()
if not n:
UpperCamelCase_ = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
UpperCamelCase_ = n
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
UpperCamelCase_ = self.dq_store.pop()
self.key_reference.remove(_UpperCAmelCase )
else:
self.dq_store.remove(_UpperCAmelCase )
self.dq_store.appendleft(_UpperCAmelCase )
self.key_reference.add(_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> None:
for k in self.dq_store:
print(_UpperCAmelCase )
def __repr__( self ) -> str:
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ : LRUCache[str | int] = LRUCache(4)
lru_cache.refer("""A""")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("""A""")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 23 | 0 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def UpperCamelCase ( lowercase_ : str , lowercase_ : str , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : int , lowercase_ : Optional[int] = None , ) -> int:
'''simple docstring'''
lowercase ={}
if train_file is not None:
lowercase =[train_file]
if eval_file is not None:
lowercase =[eval_file]
if test_file is not None:
lowercase =[test_file]
lowercase =datasets.load_dataset('''csv''' , data_files=lowercase_ )
lowercase =list(ds[list(files.keys() )[0]].features.keys() )
lowercase =features_name.pop(lowercase_ )
lowercase =list(set(ds[list(files.keys() )[0]][label_name] ) )
lowercase ={label: i for i, label in enumerate(lowercase_ )}
lowercase =tokenizer.model_input_names
lowercase ={}
if len(lowercase_ ) == 1:
for k in files.keys():
lowercase =ds[k].map(
lambda lowercase_ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=lowercase_ , max_length=lowercase_ , padding='''max_length''' ) , batched=lowercase_ , )
elif len(lowercase_ ) == 2:
for k in files.keys():
lowercase =ds[k].map(
lambda lowercase_ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=lowercase_ , max_length=lowercase_ , padding='''max_length''' , ) , batched=lowercase_ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
lowercase ={k: v for k, v in ex.items() if k in input_names}
lowercase =labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
lowercase ={k: v for k, v in ex.items() if k in input_names}
lowercase =labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
lowercase ={k: v for k, v in ex.items() if k in input_names}
lowercase =labelaid[ex[label_name]]
yield (d, label)
lowercase =(
tf.data.Dataset.from_generator(
lowercase_ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
lowercase =train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
lowercase =(
tf.data.Dataset.from_generator(
lowercase_ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
lowercase =val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
lowercase =(
tf.data.Dataset.from_generator(
lowercase_ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
lowercase =test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_UpperCAmelCase : List[Any] = logging.getLogger(__name__)
@dataclass
class __magic_name__ :
UpperCamelCase__ = field(metadata={'help': 'Which column contains the label'} )
UpperCamelCase__ = field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'The path of the training file'} )
UpperCamelCase__ = field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'The path of the development file'} )
UpperCamelCase__ = field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'The path of the test file'} )
UpperCamelCase__ = field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
UpperCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class __magic_name__ :
UpperCamelCase__ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
UpperCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
UpperCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
UpperCamelCase__ = field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
UpperCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
def UpperCamelCase ( ) -> Optional[int]:
'''simple docstring'''
lowercase =HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
lowercase , lowercase , lowercase =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
f'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
f'16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase , lowercase , lowercase , lowercase =get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=lowercase_ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
lowercase =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(lowercase_ ) , labelaid=lowercase_ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
lowercase =TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=lowercase_ , cache_dir=model_args.cache_dir , )
def compute_metrics(lowercase_ : EvalPrediction ) -> Dict:
lowercase =np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
lowercase =TFTrainer(
model=lowercase_ , args=lowercase_ , train_dataset=lowercase_ , eval_dataset=lowercase_ , compute_metrics=lowercase_ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase ={}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase =trainer.evaluate()
lowercase =os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(lowercase_ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(f' {key} = {value}' )
writer.write(f'{key} = {value}\n' )
results.update(lowercase_ )
return results
if __name__ == "__main__":
main()
| 72 |
import numpy as np
def _snake_case (__lowercase):
return 1 / (1 + np.exp(-vector))
def _snake_case (__lowercase):
return vector * sigmoid(__lowercase)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ : Dict = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
a_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 73 |
import math
from datetime import datetime, timedelta
def _snake_case (__lowercase):
UpperCamelCase_ = year % 19
UpperCamelCase_ = year % 4
UpperCamelCase_ = year % 7
UpperCamelCase_ = math.floor(year / 100)
UpperCamelCase_ = math.floor((13 + 8 * leap_day_inhibits) / 25)
UpperCamelCase_ = leap_day_inhibits / 4
UpperCamelCase_ = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
UpperCamelCase_ = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
UpperCamelCase_ = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
UpperCamelCase_ = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(__lowercase , 4 , 19)
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(__lowercase , 4 , 18)
else:
return datetime(__lowercase , 3 , 22) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday))
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
snake_case__ : Dict = """will be""" if year > datetime.now().year else """was"""
print(f'Easter in {year} {tense} {gauss_easter(year)}')
| 23 | 0 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowercase_ = """pt"""
elif is_tf_available():
lowercase_ = """tf"""
else:
lowercase_ = """jax"""
class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = PerceiverTokenizer
lowerCAmelCase_ = False
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
super().setUp()
__SCREAMING_SNAKE_CASE : Any = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' )
def UpperCAmelCase__ ( self : int , **_A : Any ):
"""simple docstring"""
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def UpperCAmelCase__ ( self : Optional[Any] , _A : Any , _A : Union[str, Any]=False , _A : str=20 , _A : int=5 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = []
for i in range(len(_A ) ):
try:
__SCREAMING_SNAKE_CASE : Any = tokenizer.decode([i] , clean_up_tokenization_spaces=_A )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__SCREAMING_SNAKE_CASE : int = list(filter(lambda _A : re.match(r'''^[ a-zA-Z]+$''' , t[1] ) , _A ) )
__SCREAMING_SNAKE_CASE : List[str] = list(filter(lambda _A : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_A ) , _A ) )
if max_length is not None and len(_A ) > max_length:
__SCREAMING_SNAKE_CASE : List[str] = toks[:max_length]
if min_length is not None and len(_A ) < min_length and len(_A ) > 0:
while len(_A ) < min_length:
__SCREAMING_SNAKE_CASE : Optional[int] = toks + toks
# toks_str = [t[1] for t in toks]
__SCREAMING_SNAKE_CASE : List[str] = [t[0] for t in toks]
# Ensure consistency
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.decode(_A , clean_up_tokenization_spaces=_A )
if " " not in output_txt and len(_A ) > 1:
__SCREAMING_SNAKE_CASE : int = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_A )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_A )
)
if with_prefix_space:
__SCREAMING_SNAKE_CASE : Optional[int] = ''' ''' + output_txt
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode(_A , add_special_tokens=_A )
return output_txt, output_ids
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self.perceiver_tokenizer
__SCREAMING_SNAKE_CASE : Optional[Any] = '''Unicode €.'''
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(_A )
__SCREAMING_SNAKE_CASE : Tuple = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['''input_ids'''] , _A )
# decoding
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.decode(_A )
self.assertEqual(_A , '''[CLS]Unicode €.[SEP]''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer('''e è é ê ë''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['''input_ids'''] , _A )
# decoding
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.decode(_A )
self.assertEqual(_A , '''[CLS]e è é ê ë[SEP]''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' )
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = self.perceiver_tokenizer
__SCREAMING_SNAKE_CASE : Optional[Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
__SCREAMING_SNAKE_CASE : List[str] = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
__SCREAMING_SNAKE_CASE : Tuple = tokenizer(_A , padding=_A , return_tensors=_A )
self.assertIsInstance(_A , _A )
if FRAMEWORK != "jax":
__SCREAMING_SNAKE_CASE : List[str] = list(batch.input_ids.numpy()[0] )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_A , _A )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.perceiver_tokenizer
__SCREAMING_SNAKE_CASE : Tuple = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(_A , padding=_A , return_tensors=_A )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , _A )
self.assertIn('''attention_mask''' , _A )
self.assertNotIn('''decoder_input_ids''' , _A )
self.assertNotIn('''decoder_attention_mask''' , _A )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.perceiver_tokenizer
__SCREAMING_SNAKE_CASE : Tuple = [
'''Summary of the text.''',
'''Another summary.''',
]
__SCREAMING_SNAKE_CASE : Any = tokenizer(
text_target=_A , max_length=32 , padding='''max_length''' , truncation=_A , return_tensors=_A )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__SCREAMING_SNAKE_CASE : Optional[Any] = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : Dict = ''' He is very happy, UNwant\u00E9d,running'''
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
__SCREAMING_SNAKE_CASE : int = tokenizer.__class__.from_pretrained(_A )
__SCREAMING_SNAKE_CASE : List[Any] = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
shutil.rmtree(_A )
__SCREAMING_SNAKE_CASE : str = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__SCREAMING_SNAKE_CASE : int = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : List[Any] = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
__SCREAMING_SNAKE_CASE : Dict = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
__SCREAMING_SNAKE_CASE : Any = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.__class__.from_pretrained(_A )
__SCREAMING_SNAKE_CASE : List[Any] = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.__class__.from_pretrained(_A , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_A )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
with open(os.path.join(_A , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
__SCREAMING_SNAKE_CASE : Any = json.load(_A )
with open(os.path.join(_A , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
__SCREAMING_SNAKE_CASE : Dict = json.load(_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = [F'''<extra_id_{i}>''' for i in range(125 )]
__SCREAMING_SNAKE_CASE : Optional[Any] = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
__SCREAMING_SNAKE_CASE : Any = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(_A , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(_A , _A )
with open(os.path.join(_A , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(_A , _A )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_class.from_pretrained(
_A , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__SCREAMING_SNAKE_CASE : str = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=_A )]
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_class.from_pretrained(
_A , additional_special_tokens=_A , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '''�''' )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizers(fast=_A , do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
__SCREAMING_SNAKE_CASE : int = tokenizer.convert_tokens_to_string(_A )
self.assertIsInstance(_A , _A )
| 74 |
import requests
def _snake_case (__lowercase , __lowercase):
UpperCamelCase_ = {'Content-Type': 'application/json'}
UpperCamelCase_ = requests.post(__lowercase , json={'text': message_body} , headers=__lowercase)
if response.status_code != 200:
UpperCamelCase_ = (
'Request to slack returned an error '
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(__lowercase)
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
| 23 | 0 |
'''simple docstring'''
from math import loga
def a__ ( lowerCAmelCase__ ) -> int:
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError('''Input value must be a \'int\' type''' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Dict:
with open(_UpperCAmelCase , encoding='utf-8' ) as input_file:
UpperCamelCase_ = re.compile(R'(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)' )
UpperCamelCase_ = input_file.read()
UpperCamelCase_ = regexp.search(_UpperCAmelCase )
return match
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Dict:
with open(_UpperCAmelCase , encoding='utf-8' ) as input_file:
UpperCamelCase_ = re.compile(R'#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()' , re.DOTALL )
UpperCamelCase_ = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCamelCase_ = regexp.finditer(_UpperCAmelCase )
UpperCamelCase_ = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ = Path('./datasets' )
UpperCamelCase_ = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(_UpperCAmelCase ) ):
raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" )
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = Path('./datasets' )
UpperCamelCase_ = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_print_statements(str(_UpperCAmelCase ) ):
raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 23 | 0 |
"""simple docstring"""
import math
def __UpperCAmelCase ( __UpperCamelCase = 1_00 ):
__lowercase : List[Any] = sum(i * i for i in range(1 , n + 1 ) )
__lowercase : Any = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F"{solution() = }")
| 76 |
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def _snake_case (__lowercase=32 , __lowercase=10 , __lowercase=100 , __lowercase=1026 , __lowercase=True , __lowercase="data/tokenized_stories_train_wikitext103.jbl" , __lowercase="igf_context_pairs.jbl" , ):
set_seed(3)
# generate train_data and objective_set
UpperCamelCase_ , UpperCamelCase_ = generate_datasets(
__lowercase , __lowercase , number=__lowercase , min_len=1026 , trim=__lowercase)
# keeps model same across runs
set_seed(4)
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
UpperCamelCase_ = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# load pretrained model
UpperCamelCase_ = load_gpta('gpt2').to(__lowercase)
print('computing perplexity on objective set')
UpperCamelCase_ = compute_perplexity(__lowercase , __lowercase , __lowercase).item()
print('perplexity on objective set:' , __lowercase)
# collect igf pairs and save to file demo.jbl
collect_objective_set(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase)
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def _snake_case (__lowercase , __lowercase=15 , __lowercase=128 , __lowercase=100 , __lowercase="igf_model.pt" , ):
set_seed(42)
# Load pre-trained model
UpperCamelCase_ = GPTaLMHeadModel.from_pretrained('gpt2')
# Initialize secondary learner to use embedding weights of model
UpperCamelCase_ = SecondaryLearner(__lowercase)
# Train secondary learner
UpperCamelCase_ = train_secondary_learner(
__lowercase , __lowercase , max_epochs=__lowercase , batch_size=__lowercase , eval_freq=100 , igf_model_path=__lowercase , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase=32 , __lowercase=1000 , __lowercase=16 , __lowercase=1.0 , __lowercase=recopy_gpta , __lowercase=None , __lowercase=10 , __lowercase="gpt2_finetuned.pt" , ):
UpperCamelCase_ = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
UpperCamelCase_ = RandomSampler(__lowercase)
UpperCamelCase_ = DataLoader(__lowercase , sampler=__lowercase)
UpperCamelCase_ = max_steps // (len(__lowercase)) + 1
UpperCamelCase_ = 0
UpperCamelCase_ = torch.zeros((1, context_len) , dtype=torch.long , device=__lowercase)
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = recopy_model(__lowercase , __lowercase , __lowercase)
model.train()
if secondary_learner is not None:
secondary_learner.to(__lowercase)
secondary_learner.eval()
UpperCamelCase_ = []
UpperCamelCase_ = 0
UpperCamelCase_ = []
UpperCamelCase_ = []
# Compute the performance of the transformer model at the beginning
UpperCamelCase_ = compute_perplexity(__lowercase , __lowercase , __lowercase)
test_perps.append(__lowercase)
print('Test perplexity, step' , __lowercase , ':' , __lowercase)
for epoch in range(int(__lowercase)):
for step, example in enumerate(__lowercase):
torch.cuda.empty_cache()
UpperCamelCase_ = random.randint(0 , example.size(2) - context_len - 1)
UpperCamelCase_ = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
UpperCamelCase_ = model(__lowercase , labels=__lowercase)
UpperCamelCase_ = True
if secondary_learner is not None:
UpperCamelCase_ = secondary_learner.forward(
torch.tensor(__lowercase , dtype=torch.long , device=__lowercase).unsqueeze(0))[0].item()
observed_qs.append(float(__lowercase))
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
UpperCamelCase_ = -1
if predicted_q < threshold:
UpperCamelCase_ = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu()))
UpperCamelCase_ = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
UpperCamelCase_ = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0)
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
UpperCamelCase_ = compute_perplexity(__lowercase , __lowercase , __lowercase)
test_perps.append(__lowercase)
print('Test perplexity, step' , __lowercase , ':' , __lowercase)
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , __lowercase)
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def _snake_case ():
UpperCamelCase_ = argparse.ArgumentParser(description='Fine-tune a transformer model with IGF on a language modeling task')
# Required parameters
parser.add_argument(
'--data_dir' , default=__lowercase , type=__lowercase , required=__lowercase , help='The input data dir. Should contain data files for WikiText.' , )
parser.add_argument(
'--model_name_or_path' , default=__lowercase , type=__lowercase , required=__lowercase , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--data_file' , type=__lowercase , default=__lowercase , help=(
'A jbl file containing tokenized data which can be split as objective dataset, '
'train_dataset and test_dataset.'
) , )
parser.add_argument(
'--igf_data_file' , type=__lowercase , default=__lowercase , help='A jbl file containing the context and information gain pairs to train secondary learner.' , )
parser.add_argument(
'--output_dir' , default=__lowercase , type=__lowercase , required=__lowercase , help='The output directory where the final fine-tuned model is stored.' , )
parser.add_argument(
'--tokenizer_name' , default=__lowercase , type=__lowercase , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument('--seed' , type=__lowercase , default=__lowercase , help='A seed for reproducible training.')
parser.add_argument(
'--context_len' , default=32 , type=__lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--size_objective_set' , default=100 , type=__lowercase , help='number of articles that are long enough to be used as our objective set' , )
parser.add_argument(
'--eval_freq' , default=100 , type=__lowercase , help='secondary model evaluation is triggered at eval_freq')
parser.add_argument('--max_steps' , default=1000 , type=__lowercase , help='To calculate training epochs')
parser.add_argument(
'--secondary_learner_batch_size' , default=128 , type=__lowercase , help='batch size of training data for secondary learner' , )
parser.add_argument(
'--batch_size' , default=16 , type=__lowercase , help='batch size of training data of language model(gpt2) ')
parser.add_argument(
'--eval_interval' , default=10 , type=__lowercase , help=(
'decay the selectivity of our secondary learner filter from'
'1 standard deviation above average to 1 below average after 10 batches'
) , )
parser.add_argument(
'--number' , default=100 , type=__lowercase , help='The number of examples split to be used as objective_set/test_data')
parser.add_argument(
'--min_len' , default=1026 , type=__lowercase , help='The minimum length of the article to be used as objective set')
parser.add_argument(
'--secondary_learner_max_epochs' , default=15 , type=__lowercase , help='number of epochs to train secondary learner')
parser.add_argument('--trim' , default=__lowercase , type=__lowercase , help='truncate the example if it exceeds context length')
parser.add_argument(
'--threshold' , default=1.0 , type=__lowercase , help=(
'The threshold value used by secondary learner to filter the train_data and allow only'
' informative data as input to the model'
) , )
parser.add_argument('--finetuned_model_name' , default='gpt2_finetuned.pt' , type=__lowercase , help='finetuned_model_name')
parser.add_argument(
'--recopy_model' , default=__lowercase , type=__lowercase , help='Reset the model to the original pretrained GPT-2 weights after each iteration' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=__lowercase , data_file='data/tokenized_stories_train_wikitext103.jbl' , igf_data_file='igf_context_pairs.jbl' , )
# Load train data for secondary learner
UpperCamelCase_ = joblib.load('data/IGF_values.jbl')
# Train secondary learner
UpperCamelCase_ = training_secondary_learner(
__lowercase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='igf_model.pt' , )
# load pretrained gpt2 model
UpperCamelCase_ = GPTaLMHeadModel.from_pretrained('gpt2')
set_seed(42)
# Generate train and test data to train and evaluate gpt2 model
UpperCamelCase_ , UpperCamelCase_ = generate_datasets(
context_len=32 , file='data/tokenized_stories_train_wikitext103.jbl' , number=100 , min_len=1026 , trim=__lowercase)
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
__lowercase , __lowercase , __lowercase , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=__lowercase , secondary_learner=__lowercase , eval_interval=10 , finetuned_model_name='gpt2_finetuned.pt' , )
if __name__ == "__main__":
main()
| 23 | 0 |
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> str:
"""simple docstring"""
__UpperCAmelCase : Any = ""
for word_or_phrase in separated:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise Exception("join() accepts only strings to be joined" )
joined += word_or_phrase + separator
return joined.strip(UpperCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 77 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class _a :
"""simple docstring"""
A_ = MBartConfig
A_ = {}
A_ = """gelu"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=20 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , ) -> Union[str, Any]:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = seq_length
UpperCamelCase_ = is_training
UpperCamelCase_ = use_labels
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = eos_token_id
UpperCamelCase_ = pad_token_id
UpperCamelCase_ = bos_token_id
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCamelCase_ = prepare_mbart_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return config, inputs_dict
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = TFMBartModel(config=_UpperCAmelCase ).get_decoder()
UpperCamelCase_ = inputs_dict['input_ids']
UpperCamelCase_ = input_ids[:1, :]
UpperCamelCase_ = inputs_dict['attention_mask'][:1, :]
UpperCamelCase_ = inputs_dict['head_mask']
UpperCamelCase_ = 1
# first forward pass
UpperCamelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = outputs.to_tuple()
UpperCamelCase_ = past_key_values[1]
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , ):
if attention_mask is None:
UpperCamelCase_ = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id) , tf.inta)
if decoder_attention_mask is None:
UpperCamelCase_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id) , tf.inta),
] , axis=-1 , )
if head_mask is None:
UpperCamelCase_ = tf.ones((config.encoder_layers, config.encoder_attention_heads))
if decoder_head_mask is None:
UpperCamelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads))
if cross_attn_head_mask is None:
UpperCamelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads))
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
A_ = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
A_ = (
{
"""conversational""": TFMBartForConditionalGeneration,
"""feature-extraction""": TFMBartModel,
"""summarization""": TFMBartForConditionalGeneration,
"""text2text-generation""": TFMBartForConditionalGeneration,
"""translation""": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
A_ = True
A_ = False
A_ = False
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = TFMBartModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
A_ = [
""" UN Chief Says There Is No Military Solution in Syria""",
]
A_ = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
]
A_ = """facebook/mbart-large-en-ro"""
@cached_property
def _UpperCAmelCase ( self ) -> Any:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _UpperCAmelCase ( self , **_UpperCAmelCase ) -> int:
UpperCamelCase_ = self.translate_src_text(**_UpperCAmelCase )
self.assertListEqual(self.expected_text , _UpperCAmelCase )
def _UpperCAmelCase ( self , **_UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = self.tokenizer(self.src_text , **_UpperCAmelCase , return_tensors='tf' )
UpperCamelCase_ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
UpperCamelCase_ = self.tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
return generated_words
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
self._assert_generated_batch_equal_expected()
| 23 | 0 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : Dict ) -> Union[str, Any]:
'''simple docstring'''
stooge(snake_case_ , 0 , len(snake_case_ ) - 1 )
return arr
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : Dict , snake_case_ : str ) -> Tuple:
'''simple docstring'''
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
UpperCAmelCase_ , UpperCAmelCase_ = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
UpperCAmelCase_ = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(snake_case_ , snake_case_ , (h - t) )
# Recursively sort last 2/3 elements
stooge(snake_case_ , i + t , (snake_case_) )
# Recursively sort first 2/3 elements
stooge(snake_case_ , snake_case_ , (h - t) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Dict =input('Enter numbers separated by a comma:\n').strip()
SCREAMING_SNAKE_CASE_: Tuple =[int(item) for item in user_input.split(',')]
print(stooge_sort(unsorted))
| 78 |
def _snake_case (__lowercase):
UpperCamelCase_ = 1
for i in range(1 , num + 1):
fact *= i
return fact
def _snake_case (__lowercase):
UpperCamelCase_ = 0
while number > 0:
UpperCamelCase_ = number % 10
sum_of_digits += last_digit
UpperCamelCase_ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def _snake_case (__lowercase = 100):
UpperCamelCase_ = factorial(__lowercase)
UpperCamelCase_ = split_and_add(__lowercase)
return result
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 23 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : Any = {"""configuration_plbart""": ["""PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PLBartConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Tuple = ["""PLBartTokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Tuple = [
"""PLBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PLBartForCausalLM""",
"""PLBartForConditionalGeneration""",
"""PLBartForSequenceClassification""",
"""PLBartModel""",
"""PLBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 79 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
snake_case__ : str = logging.get_logger(__name__)
def _snake_case (__lowercase):
if isinstance(__lowercase , (list, tuple)) and isinstance(videos[0] , (list, tuple)) and is_valid_image(videos[0][0]):
return videos
elif isinstance(__lowercase , (list, tuple)) and is_valid_image(videos[0]):
return [videos]
elif is_valid_image(__lowercase):
return [[videos]]
raise ValueError(f"""Could not make batched video from {videos}""")
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = ["""pixel_values"""]
def __init__( self , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = PILImageResampling.BILINEAR , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 255 , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> None:
super().__init__(**_UpperCAmelCase )
UpperCamelCase_ = size if size is not None else {'shortest_edge': 224}
UpperCamelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
UpperCamelCase_ = crop_size if crop_size is not None else {'height': 224, 'width': 224}
UpperCamelCase_ = get_size_dict(_UpperCAmelCase , param_name='crop_size' )
UpperCamelCase_ = do_resize
UpperCamelCase_ = size
UpperCamelCase_ = do_center_crop
UpperCamelCase_ = crop_size
UpperCamelCase_ = resample
UpperCamelCase_ = do_rescale
UpperCamelCase_ = rescale_factor
UpperCamelCase_ = do_normalize
UpperCamelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = PILImageResampling.BILINEAR , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> np.ndarray:
UpperCamelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
if "shortest_edge" in size:
UpperCamelCase_ = get_resize_output_image_size(_UpperCAmelCase , size['shortest_edge'] , default_to_square=_UpperCAmelCase )
elif "height" in size and "width" in size:
UpperCamelCase_ = (size['height'], size['width'])
else:
raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> np.ndarray:
UpperCamelCase_ = get_size_dict(_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(_UpperCAmelCase , size=(size['height'], size['width']) , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> int:
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> np.ndarray:
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
UpperCamelCase_ = to_numpy_array(_UpperCAmelCase )
if do_resize:
UpperCamelCase_ = self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase )
if do_center_crop:
UpperCamelCase_ = self.center_crop(_UpperCAmelCase , size=_UpperCAmelCase )
if do_rescale:
UpperCamelCase_ = self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase )
if do_normalize:
UpperCamelCase_ = self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase )
UpperCamelCase_ = to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase )
return image
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , **_UpperCAmelCase , ) -> PIL.Image.Image:
UpperCamelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase_ = resample if resample is not None else self.resample
UpperCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase_ = image_std if image_std is not None else self.image_std
UpperCamelCase_ = size if size is not None else self.size
UpperCamelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
UpperCamelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase_ = get_size_dict(_UpperCAmelCase , param_name='crop_size' )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
UpperCamelCase_ = make_batched(_UpperCAmelCase )
UpperCamelCase_ = [
[
self._preprocess_image(
image=_UpperCAmelCase , do_resize=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , do_center_crop=_UpperCAmelCase , crop_size=_UpperCAmelCase , do_rescale=_UpperCAmelCase , rescale_factor=_UpperCAmelCase , do_normalize=_UpperCAmelCase , image_mean=_UpperCAmelCase , image_std=_UpperCAmelCase , data_format=_UpperCAmelCase , )
for img in video
]
for video in videos
]
UpperCamelCase_ = {'pixel_values': videos}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
| 23 | 0 |
from sklearn.metrics import matthews_corrcoef
import datasets
__UpperCamelCase : Union[str, Any] = """
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
"""
__UpperCamelCase : List[str] = """
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results['matthews_correlation'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results['matthews_correlation'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results['matthews_correlation'], 2))
-0.25
"""
__UpperCamelCase : Tuple = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def _a ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def _a ( self : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any]=None ) -> Optional[Any]:
"""simple docstring"""
return {
"matthews_correlation": float(matthews_corrcoef(_lowerCAmelCase , _lowerCAmelCase , sample_weight=_lowerCAmelCase ) ),
}
| 80 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = 42
A_ = 42
class _a ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
A_ = 1
@register_to_config
def __init__( self , _UpperCAmelCase = 2000 , _UpperCAmelCase = 0.1_5 , _UpperCAmelCase = 0.0_1 , _UpperCAmelCase = 1_3_4_8.0 , _UpperCAmelCase = 1e-5 , _UpperCAmelCase = 1 , ) -> Tuple:
# standard deviation of the initial noise distribution
UpperCamelCase_ = sigma_max
# setable values
UpperCamelCase_ = None
self.set_sigmas(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> torch.FloatTensor:
return sample
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None ) -> str:
UpperCamelCase_ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
UpperCamelCase_ = torch.linspace(1 , _UpperCAmelCase , _UpperCAmelCase , device=_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None ) -> Any:
UpperCamelCase_ = sigma_min if sigma_min is not None else self.config.sigma_min
UpperCamelCase_ = sigma_max if sigma_max is not None else self.config.sigma_max
UpperCamelCase_ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
UpperCamelCase_ = torch.exp(torch.linspace(math.log(_UpperCAmelCase ) , math.log(_UpperCAmelCase ) , _UpperCAmelCase ) )
UpperCamelCase_ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = True , ) -> Union[SdeVeOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
UpperCamelCase_ = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
UpperCamelCase_ = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
UpperCamelCase_ = timesteps.to(self.discrete_sigmas.device )
UpperCamelCase_ = self.discrete_sigmas[timesteps].to(sample.device )
UpperCamelCase_ = self.get_adjacent_sigma(_UpperCAmelCase , _UpperCAmelCase ).to(sample.device )
UpperCamelCase_ = torch.zeros_like(_UpperCAmelCase )
UpperCamelCase_ = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
UpperCamelCase_ = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
UpperCamelCase_ = diffusion.unsqueeze(-1 )
UpperCamelCase_ = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
UpperCamelCase_ = randn_tensor(
sample.shape , layout=sample.layout , generator=_UpperCAmelCase , device=sample.device , dtype=sample.dtype )
UpperCamelCase_ = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
UpperCamelCase_ = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=_UpperCAmelCase , prev_sample_mean=_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = True , ) -> Union[SchedulerOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
UpperCamelCase_ = randn_tensor(sample.shape , layout=sample.layout , generator=_UpperCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
UpperCamelCase_ = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
UpperCamelCase_ = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
UpperCamelCase_ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
UpperCamelCase_ = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
UpperCamelCase_ = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
UpperCamelCase_ = step_size.unsqueeze(-1 )
UpperCamelCase_ = sample + step_size * model_output
UpperCamelCase_ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
UpperCamelCase_ = timesteps.to(original_samples.device )
UpperCamelCase_ = self.discrete_sigmas.to(original_samples.device )[timesteps]
UpperCamelCase_ = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(_UpperCAmelCase ) * sigmas[:, None, None, None]
)
UpperCamelCase_ = noise + original_samples
return noisy_samples
def __len__( self ) -> Optional[int]:
return self.config.num_train_timesteps
| 23 | 0 |
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
while second != 0:
__snake_case : List[str] = first & second
first ^= second
__snake_case : Tuple = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case : List[Any] = int(input("Enter the first number: ").strip())
_snake_case : Optional[int] = int(input("Enter the second number: ").strip())
print(f'''{add(first, second) = }''')
| 81 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ : Optional[int] = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Dict = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
snake_case__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 23 | 0 |
"""simple docstring"""
import os
import sys
import unittest
lowerCamelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCamelCase = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""")
lowerCamelCase = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""")
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : List[Any] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = get_test_to_tester_mapping(_UpperCAmelCase )
UpperCAmelCase_ = get_test_to_tester_mapping(_UpperCAmelCase )
UpperCAmelCase_ = {"BertModelTest": "BertModelTester"}
UpperCAmelCase_ = {
"BlipModelTest": "BlipModelTester",
"BlipTextImageModelTest": "BlipTextImageModelsModelTester",
"BlipTextModelTest": "BlipTextModelTester",
"BlipTextRetrievalModelTest": "BlipTextRetrievalModelTester",
"BlipVQAModelTest": "BlipVQAModelTester",
"BlipVisionModelTest": "BlipVisionModelTester",
}
self.assertEqual(get_test_info.to_json(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(get_test_info.to_json(_UpperCAmelCase ) , _UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = get_model_to_test_mapping(_UpperCAmelCase )
UpperCAmelCase_ = get_model_to_test_mapping(_UpperCAmelCase )
UpperCAmelCase_ = {
"BertForMaskedLM": ["BertModelTest"],
"BertForMultipleChoice": ["BertModelTest"],
"BertForNextSentencePrediction": ["BertModelTest"],
"BertForPreTraining": ["BertModelTest"],
"BertForQuestionAnswering": ["BertModelTest"],
"BertForSequenceClassification": ["BertModelTest"],
"BertForTokenClassification": ["BertModelTest"],
"BertLMHeadModel": ["BertModelTest"],
"BertModel": ["BertModelTest"],
}
UpperCAmelCase_ = {
"BlipForConditionalGeneration": ["BlipTextImageModelTest"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTest"],
"BlipForQuestionAnswering": ["BlipVQAModelTest"],
"BlipModel": ["BlipModelTest"],
"BlipTextModel": ["BlipTextModelTest"],
"BlipVisionModel": ["BlipVisionModelTest"],
}
self.assertEqual(get_test_info.to_json(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(get_test_info.to_json(_UpperCAmelCase ) , _UpperCAmelCase )
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = get_model_to_tester_mapping(_UpperCAmelCase )
UpperCAmelCase_ = get_model_to_tester_mapping(_UpperCAmelCase )
UpperCAmelCase_ = {
"BertForMaskedLM": ["BertModelTester"],
"BertForMultipleChoice": ["BertModelTester"],
"BertForNextSentencePrediction": ["BertModelTester"],
"BertForPreTraining": ["BertModelTester"],
"BertForQuestionAnswering": ["BertModelTester"],
"BertForSequenceClassification": ["BertModelTester"],
"BertForTokenClassification": ["BertModelTester"],
"BertLMHeadModel": ["BertModelTester"],
"BertModel": ["BertModelTester"],
}
UpperCAmelCase_ = {
"BlipForConditionalGeneration": ["BlipTextImageModelsModelTester"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTester"],
"BlipForQuestionAnswering": ["BlipVQAModelTester"],
"BlipModel": ["BlipModelTester"],
"BlipTextModel": ["BlipTextModelTester"],
"BlipVisionModel": ["BlipVisionModelTester"],
}
self.assertEqual(get_test_info.to_json(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(get_test_info.to_json(_UpperCAmelCase ) , _UpperCAmelCase )
| 82 |
import datasets
from .evaluate import evaluate
snake_case__ : int = """\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
"""
snake_case__ : Union[str, Any] = """
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
"""
snake_case__ : Any = """
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the CUAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
'aupr': Area Under the Precision-Recall curve
'prec_at_80_recall': Precision at 80% recall
'prec_at_90_recall': Precision at 90% recall
Examples:
>>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> cuad_metric = datasets.load_metric(\"cuad\")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': {
'id': datasets.Value('string' ),
'prediction_text': datasets.features.Sequence(datasets.Value('string' ) ),
},
'references': {
'id': datasets.Value('string' ),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
},
} ) , codebase_urls=['https://www.atticusprojectai.org/cuad'] , reference_urls=['https://www.atticusprojectai.org/cuad'] , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
UpperCamelCase_ = {prediction['id']: prediction['prediction_text'] for prediction in predictions}
UpperCamelCase_ = [
{
'paragraphs': [
{
'qas': [
{
'answers': [{'text': answer_text} for answer_text in ref['answers']['text']],
'id': ref['id'],
}
for ref in references
]
}
]
}
]
UpperCamelCase_ = evaluate(dataset=_UpperCAmelCase , predictions=_UpperCAmelCase )
return score
| 23 | 0 |
"""simple docstring"""
lowerCAmelCase__ = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
lowerCAmelCase__ = {value: key for key, value in MORSE_CODE_DICT.items()}
def snake_case_ ( A_ : str ):
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def snake_case_ ( A_ : str ):
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : List[Any] = '''Morse code here!'''
print(A_ )
_lowerCamelCase : Optional[Any] = encrypt(A_ )
print(A_ )
_lowerCamelCase : Optional[Any] = decrypt(A_ )
print(A_ )
if __name__ == "__main__":
main()
| 83 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class _a ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> List[str]:
return datasets.DatasetInfo(
features=datasets.Features({'content': datasets.Value('string' )} ) , supervised_keys=_UpperCAmelCase , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_dummy_examples()} )]
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase )
class _a ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Any:
return datasets.DatasetInfo(
features=datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) , supervised_keys=_UpperCAmelCase , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_nested_examples()} )
]
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase )
def _snake_case ():
return [(i, {"content": content}) for i, content in enumerate(['foo', 'bar', 'foobar'])]
def _snake_case ():
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['foo', 'bar', 'foobar'])]
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
@require_beam
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase_ = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
UpperCamelCase_ = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , _UpperCAmelCase )
self.assertEqual(dset['train'].info.splits['train'].num_examples , _UpperCAmelCase )
self.assertDictEqual(dset['train'][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def _UpperCAmelCase ( self ) -> List[str]:
import apache_beam as beam
UpperCamelCase_ = beam.io.parquetio.WriteToParquet
UpperCamelCase_ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase_ = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner='DirectRunner' )
with patch('apache_beam.io.parquetio.WriteToParquet' ) as write_parquet_mock:
UpperCamelCase_ = partial(_UpperCAmelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
UpperCamelCase_ = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , _UpperCAmelCase )
self.assertEqual(dset['train'].info.splits['train'].num_examples , _UpperCAmelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['train']['content'] ) , sorted(['foo', 'bar', 'foobar'] ) )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def _UpperCAmelCase ( self ) -> Any:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase_ = DummyBeamDataset(cache_dir=_UpperCAmelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase_ = NestedBeamDataset(cache_dir=_UpperCAmelCase , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) )
UpperCamelCase_ = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , _UpperCAmelCase )
self.assertEqual(dset['train'].info.splits['train'].num_examples , _UpperCAmelCase )
self.assertDictEqual(dset['train'][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
| 23 | 0 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
UpperCAmelCase = TypeVar('''T''')
UpperCAmelCase = TypeVar('''U''')
class A_ ( Generic[T, U] ):
'''simple docstring'''
def __init__( self , snake_case , snake_case ):
lowercase = key
lowercase = val
lowercase = None
lowercase = None
def __repr__( self ):
return (
F'''Node: key: {self.key}, val: {self.val}, '''
F'''has next: {bool(self.next )}, has prev: {bool(self.prev )}'''
)
class A_ ( Generic[T, U] ):
'''simple docstring'''
def __init__( self ):
lowercase = DoubleLinkedListNode(snake_case , snake_case )
lowercase = DoubleLinkedListNode(snake_case , snake_case )
lowercase , lowercase = self.rear, self.head
def __repr__( self ):
lowercase = ['DoubleLinkedList']
lowercase = self.head
while node.next is not None:
rep.append(str(snake_case ) )
lowercase = node.next
rep.append(str(self.rear ) )
return ",\n ".join(snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
lowercase = node
lowercase = previous
lowercase = node
lowercase = self.rear
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if node.prev is None or node.next is None:
return None
lowercase = node.next
lowercase = node.prev
lowercase = None
lowercase = None
return node
class A_ ( Generic[T, U] ):
'''simple docstring'''
_UpperCamelCase : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self , snake_case ):
lowercase = DoubleLinkedList()
lowercase = capacity
lowercase = 0
lowercase = 0
lowercase = 0
lowercase = {}
def __repr__( self ):
return (
F'''CacheInfo(hits={self.hits}, misses={self.miss}, '''
F'''capacity={self.capacity}, current size={self.num_keys})'''
)
def __contains__( self , snake_case ):
return key in self.cache
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
lowercase = self.cache[key]
lowercase = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(snake_case )
return node.val
self.miss += 1
return None
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
lowercase = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(snake_case ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
lowercase = DoubleLinkedListNode(snake_case , snake_case )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
lowercase = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
lowercase = value
self.list.add(snake_case )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , snake_case = 128 ):
def cache_decorator_inner(snake_case ) -> Callable[..., U]:
def cache_decorator_wrapper(*snake_case ) -> U:
if func not in cls.decorator_function_to_instance_map:
lowercase = LRUCache(snake_case )
lowercase = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
lowercase = func(*snake_case )
cls.decorator_function_to_instance_map[func].put(args[0] , snake_case )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(snake_case , 'cache_info' , snake_case ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84 |
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def _snake_case (__lowercase , __lowercase , __lowercase):
# Initialise PyTorch model
UpperCamelCase_ = AlbertConfig.from_json_file(__lowercase)
print(f"""Building PyTorch model from configuration: {config}""")
UpperCamelCase_ = AlbertForPreTraining(__lowercase)
# Load weights from tf checkpoint
load_tf_weights_in_albert(__lowercase , __lowercase , __lowercase)
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""")
torch.save(model.state_dict() , __lowercase)
if __name__ == "__main__":
snake_case__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--albert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained ALBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
snake_case__ : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 23 | 0 |
def _a ( lowercase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 85 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
@slow
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' )
UpperCamelCase_ = BertTokenizer.from_pretrained('bert-base-uncased' )
UpperCamelCase_ = bertabert.config.encoder.vocab_size
UpperCamelCase_ = tokenizer.sep_token_id
UpperCamelCase_ = tokenizer.cls_token_id
UpperCamelCase_ = 128
UpperCamelCase_ = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' )
UpperCamelCase_ = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' )
UpperCamelCase_ = train_dataset.select(range(32 ) )
UpperCamelCase_ = val_dataset.select(range(16 ) )
UpperCamelCase_ = 4
def _map_to_encoder_decoder_inputs(_UpperCAmelCase ):
# Tokenizer will automatically set [BOS] <text> [EOS]
UpperCamelCase_ = tokenizer(batch['article'] , padding='max_length' , truncation=_UpperCAmelCase , max_length=512 )
UpperCamelCase_ = tokenizer(batch['highlights'] , padding='max_length' , truncation=_UpperCAmelCase , max_length=128 )
UpperCamelCase_ = inputs.input_ids
UpperCamelCase_ = inputs.attention_mask
UpperCamelCase_ = outputs.input_ids
UpperCamelCase_ = outputs.input_ids.copy()
UpperCamelCase_ = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
UpperCamelCase_ = outputs.attention_mask
assert all(len(_UpperCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(_UpperCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_UpperCAmelCase ):
UpperCamelCase_ = pred.label_ids
UpperCamelCase_ = pred.predictions
# all unnecessary tokens are removed
UpperCamelCase_ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
UpperCamelCase_ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
UpperCamelCase_ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_UpperCAmelCase ) )] ) / len(_UpperCAmelCase )
return {"accuracy": accuracy}
# map train dataset
UpperCamelCase_ = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_UpperCAmelCase , batch_size=_UpperCAmelCase , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
UpperCamelCase_ = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_UpperCAmelCase , batch_size=_UpperCAmelCase , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = SeqaSeqTrainingArguments(
output_dir=_UpperCAmelCase , per_device_train_batch_size=_UpperCAmelCase , per_device_eval_batch_size=_UpperCAmelCase , predict_with_generate=_UpperCAmelCase , evaluation_strategy='steps' , do_train=_UpperCAmelCase , do_eval=_UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
UpperCamelCase_ = SeqaSeqTrainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , tokenizer=_UpperCAmelCase , )
# start training
trainer.train()
| 23 | 0 |
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def __snake_case ( __UpperCamelCase : Optional[int] ):
"""simple docstring"""
if "img_encoder.pos_embed" in name:
A_ = name.replace("img_encoder.pos_embed" ,"vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
A_ = name.replace("img_encoder.patch_embed.proj" ,"vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
A_ = name.replace("img_encoder.patch_embed.norm" ,"vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
A_ = name.replace("img_encoder.layers" ,"vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
A_ = name.replace("blocks" ,"layers" )
if "attn" in name and "pre_assign" not in name:
A_ = name.replace("attn" ,"self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
A_ = name.replace("proj" ,"out_proj" )
if "pre_assign_attn.attn.proj" in name:
A_ = name.replace("pre_assign_attn.attn.proj" ,"pre_assign_attn.attn.out_proj" )
if "norm1" in name:
A_ = name.replace("norm1" ,"layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
A_ = name.replace("norm2" ,"layer_norm2" )
if "img_encoder.norm" in name:
A_ = name.replace("img_encoder.norm" ,"vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
A_ = name.replace("text_encoder.token_embedding" ,"text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
A_ = name.replace("text_encoder.positional_embedding" ,"text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
A_ = name.replace("text_encoder.transformer.resblocks." ,"text_model.encoder.layers." )
if "ln_1" in name:
A_ = name.replace("ln_1" ,"layer_norm1" )
if "ln_2" in name:
A_ = name.replace("ln_2" ,"layer_norm2" )
if "c_fc" in name:
A_ = name.replace("c_fc" ,"fc1" )
if "c_proj" in name:
A_ = name.replace("c_proj" ,"fc2" )
if "text_encoder" in name:
A_ = name.replace("text_encoder" ,"text_model" )
if "ln_final" in name:
A_ = name.replace("ln_final" ,"final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
A_ = name.replace("img_projector.linear_hidden." ,"visual_projection." )
if "img_projector.linear_out." in name:
A_ = name.replace("img_projector.linear_out." ,"visual_projection.3." )
if "text_projector.linear_hidden" in name:
A_ = name.replace("text_projector.linear_hidden" ,"text_projection" )
if "text_projector.linear_out" in name:
A_ = name.replace("text_projector.linear_out" ,"text_projection.3" )
return name
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : List[Any] ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
A_ = orig_state_dict.pop(__UpperCamelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
A_ = key.split("." )
A_ , A_ = int(key_split[2] ), int(key_split[4] )
A_ = config.vision_config.hidden_size
if "weight" in key:
A_ = val[:dim, :]
A_ = val[dim : dim * 2, :]
A_ = val[-dim:, :]
else:
A_ = val[:dim]
A_ = val[dim : dim * 2]
A_ = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
A_ = key.split("." )
A_ = int(key_split[3] )
A_ = config.text_config.hidden_size
if "weight" in key:
A_ = val[:dim, :]
A_ = val[
dim : dim * 2, :
]
A_ = val[-dim:, :]
else:
A_ = val[:dim]
A_ = val[dim : dim * 2]
A_ = val[-dim:]
else:
A_ = rename_key(__UpperCamelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
A_ = val.squeeze_()
else:
A_ = val
return orig_state_dict
def __snake_case ( ):
"""simple docstring"""
A_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[int]="groupvit-gcc-yfcc" ,__UpperCamelCase : List[Any]=False ):
"""simple docstring"""
A_ = GroupViTConfig()
A_ = GroupViTModel(__UpperCamelCase ).eval()
A_ = torch.load(__UpperCamelCase ,map_location="cpu" )["model"]
A_ = convert_state_dict(__UpperCamelCase ,__UpperCamelCase )
A_ , A_ = model.load_state_dict(__UpperCamelCase ,strict=__UpperCamelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(__UpperCamelCase ) == 0)
# verify result
A_ = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
A_ = prepare_img()
A_ = processor(text=["a photo of a cat", "a photo of a dog"] ,images=__UpperCamelCase ,padding=__UpperCamelCase ,return_tensors="pt" )
with torch.no_grad():
A_ = model(**__UpperCamelCase )
if model_name == "groupvit-gcc-yfcc":
A_ = torch.tensor([[13.3523, 6.3629]] )
elif model_name == "groupvit-gcc-redcaps":
A_ = torch.tensor([[16.1873, 8.6230]] )
else:
raise ValueError(f'''Model name {model_name} not supported.''' )
assert torch.allclose(outputs.logits_per_image ,__UpperCamelCase ,atol=1E-3 )
processor.save_pretrained(__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
print("Successfully saved processor and model to" ,__UpperCamelCase )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(__UpperCamelCase ,organization="nielsr" )
model.push_to_hub(__UpperCamelCase ,organization="nielsr" )
if __name__ == "__main__":
__a :Tuple = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
__a :int = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 86 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
snake_case__ : Dict = 1_6
snake_case__ : List[str] = 3_2
def _snake_case (__lowercase , __lowercase = 16):
UpperCamelCase_ = AutoTokenizer.from_pretrained('bert-base-cased')
UpperCamelCase_ = load_dataset('glue' , 'mrpc')
def tokenize_function(__lowercase):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase_ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__lowercase , max_length=__lowercase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase_ = datasets.map(
__lowercase , batched=__lowercase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase_ = tokenized_datasets.rename_column('label' , 'labels')
def collate_fn(__lowercase):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase_ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase_ = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase_ = 8
else:
UpperCamelCase_ = None
return tokenizer.pad(
__lowercase , padding='longest' , max_length=__lowercase , pad_to_multiple_of=__lowercase , return_tensors='pt' , )
# Instantiate dataloaders.
UpperCamelCase_ = DataLoader(
tokenized_datasets['train'] , shuffle=__lowercase , collate_fn=__lowercase , batch_size=__lowercase)
UpperCamelCase_ = DataLoader(
tokenized_datasets['validation'] , shuffle=__lowercase , collate_fn=__lowercase , batch_size=__lowercase)
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
snake_case__ : List[str] = mocked_dataloaders # noqa: F811
def _snake_case (__lowercase , __lowercase):
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , __lowercase) == "1":
UpperCamelCase_ = 2
# New Code #
UpperCamelCase_ = int(args.gradient_accumulation_steps)
# Initialize accelerator
UpperCamelCase_ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__lowercase)
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`')
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase_ = config['lr']
UpperCamelCase_ = int(config['num_epochs'])
UpperCamelCase_ = int(config['seed'])
UpperCamelCase_ = int(config['batch_size'])
UpperCamelCase_ = evaluate.load('glue' , 'mrpc')
set_seed(__lowercase)
UpperCamelCase_ , UpperCamelCase_ = get_dataloaders(__lowercase , __lowercase)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase_ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=__lowercase)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase_ = model.to(accelerator.device)
# Instantiate optimizer
UpperCamelCase_ = AdamW(params=model.parameters() , lr=__lowercase)
# Instantiate scheduler
UpperCamelCase_ = get_linear_schedule_with_warmup(
optimizer=__lowercase , num_warmup_steps=100 , num_training_steps=(len(__lowercase) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = accelerator.prepare(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase)
# Now we train the model
for epoch in range(__lowercase):
model.train()
for step, batch in enumerate(__lowercase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__lowercase):
UpperCamelCase_ = model(**__lowercase)
UpperCamelCase_ = output.loss
accelerator.backward(__lowercase)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowercase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
UpperCamelCase_ = model(**__lowercase)
UpperCamelCase_ = outputs.logits.argmax(dim=-1)
UpperCamelCase_ , UpperCamelCase_ = accelerator.gather_for_metrics((predictions, batch['labels']))
metric.add_batch(
predictions=__lowercase , references=__lowercase , )
UpperCamelCase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __lowercase)
def _snake_case ():
UpperCamelCase_ = argparse.ArgumentParser(description='Simple example of training script.')
parser.add_argument(
'--mixed_precision' , type=__lowercase , default=__lowercase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=__lowercase , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.')
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(__lowercase , __lowercase)
if __name__ == "__main__":
main()
| 23 | 0 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = ['''image_processor''', '''tokenizer''']
UpperCAmelCase__ = '''LayoutLMv2ImageProcessor'''
UpperCAmelCase__ = ('''LayoutXLMTokenizer''', '''LayoutXLMTokenizerFast''')
def __init__( self : Any , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Optional[int]=None , **UpperCAmelCase__ : Optional[int]) ->Optional[Any]:
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCAmelCase__ , )
A__ = kwargs.pop('''feature_extractor''')
A__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''')
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''')
super().__init__(UpperCAmelCase__ , UpperCAmelCase__)
def __call__( self : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase__ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , UpperCAmelCase__ : Union[List[List[int]], List[List[List[int]]]] = None , UpperCAmelCase__ : Optional[Union[List[int], List[List[int]]]] = None , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase__ : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , **UpperCAmelCase__ : Union[str, Any] , ) ->BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''')
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''')
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''')
# first, apply the image processor
A__ = self.image_processor(images=UpperCAmelCase__ , return_tensors=UpperCAmelCase__)
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__):
A__ = [text] # add batch dimension (as the image processor always adds a batch dimension)
A__ = features['''words''']
A__ = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , stride=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , return_overflowing_tokens=UpperCAmelCase__ , return_special_tokens_mask=UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , return_length=UpperCAmelCase__ , verbose=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ , )
# add pixel values
A__ = features.pop('''pixel_values''')
if return_overflowing_tokens is True:
A__ = self.get_overflowing_images(UpperCAmelCase__ , encoded_inputs['''overflow_to_sample_mapping'''])
A__ = images
return encoded_inputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple) ->Union[str, Any]:
'''simple docstring'''
A__ = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx])
if len(UpperCAmelCase__) != len(UpperCAmelCase__):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f""" {len(UpperCAmelCase__)} and {len(UpperCAmelCase__)}""")
return images_with_overflow
def SCREAMING_SNAKE_CASE ( self : Tuple , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Optional[int]) ->int:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : Dict) ->int:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__)
@property
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->str:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def SCREAMING_SNAKE_CASE ( self : int) ->Union[str, Any]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCAmelCase__ , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCAmelCase__ , )
return self.image_processor
| 87 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class _a :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=2 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=10 , _UpperCAmelCase=3 , _UpperCAmelCase=32 * 8 , _UpperCAmelCase=32 * 8 , _UpperCAmelCase=4 , _UpperCAmelCase=64 , ) -> List[Any]:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = is_training
UpperCamelCase_ = use_auxiliary_loss
UpperCamelCase_ = num_queries
UpperCamelCase_ = num_channels
UpperCamelCase_ = min_size
UpperCamelCase_ = max_size
UpperCamelCase_ = num_labels
UpperCamelCase_ = hidden_dim
UpperCamelCase_ = hidden_dim
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_UpperCAmelCase )
UpperCamelCase_ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_UpperCAmelCase )
UpperCamelCase_ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_UpperCAmelCase ) > 0.5
).float()
UpperCamelCase_ = (torch.rand((self.batch_size, self.num_labels) , device=_UpperCAmelCase ) > 0.5).long()
UpperCamelCase_ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
UpperCamelCase_ = self.num_queries
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = [1, 1, 1, 1]
UpperCamelCase_ = self.num_channels
UpperCamelCase_ = 64
UpperCamelCase_ = 128
UpperCamelCase_ = self.hidden_dim
UpperCamelCase_ = self.hidden_dim
UpperCamelCase_ = self.hidden_dim
return config
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
UpperCamelCase_ = output.encoder_hidden_states
UpperCamelCase_ = output.pixel_decoder_hidden_states
UpperCamelCase_ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_UpperCAmelCase ) , config.decoder_layers )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Any:
with torch.no_grad():
UpperCamelCase_ = MaskaFormerModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase )
UpperCamelCase_ = model(_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
UpperCamelCase_ = MaskaFormerForUniversalSegmentation(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
def comm_check_on_output(_UpperCAmelCase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCamelCase_ = model(pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase )
UpperCamelCase_ = model(_UpperCAmelCase )
comm_check_on_output(_UpperCAmelCase )
UpperCamelCase_ = model(
pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase )
comm_check_on_output(_UpperCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
A_ = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {}
A_ = False
A_ = False
A_ = False
A_ = False
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = MaskaFormerModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_UpperCAmelCase , **_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_UpperCAmelCase )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Any:
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def _UpperCAmelCase ( self ) -> Any:
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _UpperCAmelCase ( self ) -> int:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> str:
pass
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_UpperCAmelCase )
UpperCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ = [*signature.parameters.keys()]
UpperCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
@slow
def _UpperCAmelCase ( self ) -> Tuple:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
UpperCamelCase_ = MaskaFormerModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = (self.model_tester.min_size,) * 2
UpperCamelCase_ = {
'pixel_values': torch.randn((2, 3, *size) , device=_UpperCAmelCase ),
'mask_labels': torch.randn((2, 10, *size) , device=_UpperCAmelCase ),
'class_labels': torch.zeros(2 , 10 , device=_UpperCAmelCase ).long(),
}
UpperCamelCase_ = self.model_tester.get_config()
UpperCamelCase_ = MaskaFormerForUniversalSegmentation(_UpperCAmelCase ).to(_UpperCAmelCase )
UpperCamelCase_ = model(**_UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_UpperCAmelCase , **_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_UpperCAmelCase ).to(_UpperCAmelCase )
UpperCamelCase_ = model(**_UpperCAmelCase , output_attentions=_UpperCAmelCase )
self.assertTrue(outputs.attentions is not None )
def _UpperCAmelCase ( self ) -> List[Any]:
if not self.model_tester.is_training:
return
UpperCamelCase_ = self.all_model_classes[1]
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
UpperCamelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
UpperCamelCase_ = model(_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase ).loss
loss.backward()
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = self.all_model_classes[1]
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
UpperCamelCase_ = True
UpperCamelCase_ = True
UpperCamelCase_ = model_class(_UpperCAmelCase ).to(_UpperCAmelCase )
model.train()
UpperCamelCase_ = model(_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase )
UpperCamelCase_ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCamelCase_ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
UpperCamelCase_ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCamelCase_ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_UpperCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
snake_case__ : List[Any] = 1E-4
def _snake_case ():
UpperCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_vision
@slow
class _a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCAmelCase ( self ) -> Optional[int]:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _UpperCAmelCase ( self ) -> List[str]:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase )
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
UpperCamelCase_ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCAmelCase , (1, 3, 384, 384) )
with torch.no_grad():
UpperCamelCase_ = model(**_UpperCAmelCase )
UpperCamelCase_ = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
UpperCamelCase_ = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
UpperCamelCase_ = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase ).eval()
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
UpperCamelCase_ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCAmelCase , (1, 3, 384, 384) )
with torch.no_grad():
UpperCamelCase_ = model(**_UpperCAmelCase )
# masks_queries_logits
UpperCamelCase_ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
UpperCamelCase_ = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
UpperCamelCase_ = torch.tensor(_UpperCAmelCase ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
# class_queries_logits
UpperCamelCase_ = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
UpperCamelCase_ = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase ).eval()
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , )
UpperCamelCase_ = inputs['pixel_values'].to(_UpperCAmelCase )
UpperCamelCase_ = [el.to(_UpperCAmelCase ) for el in inputs['mask_labels']]
UpperCamelCase_ = [el.to(_UpperCAmelCase ) for el in inputs['class_labels']]
with torch.no_grad():
UpperCamelCase_ = model(**_UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
| 23 | 0 |
"""simple docstring"""
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
UpperCAmelCase = re.compile(r"""\b(a|an|the)\b""", re.UNICODE)
UpperCAmelCase = None
def _snake_case ( ):
"""simple docstring"""
_lowerCamelCase : List[Any] = argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" )
parser.add_argument("""data_file""" , metavar="""data.json""" , help="""Input data JSON file.""" )
parser.add_argument("""pred_file""" , metavar="""pred.json""" , help="""Model predictions.""" )
parser.add_argument(
"""--out-file""" , """-o""" , metavar="""eval.json""" , help="""Write accuracy metrics to file (default is stdout).""" )
parser.add_argument(
"""--na-prob-file""" , """-n""" , metavar="""na_prob.json""" , help="""Model estimates of probability of no answer.""" )
parser.add_argument(
"""--na-prob-thresh""" , """-t""" , type=__snake_case , default=1.0 , help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" , )
parser.add_argument(
"""--out-image-dir""" , """-p""" , metavar="""out_images""" , default=__snake_case , help="""Save precision-recall curves to directory.""" )
parser.add_argument("""--verbose""" , """-v""" , action="""store_true""" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def _snake_case ( __snake_case : Dict ):
"""simple docstring"""
_lowerCamelCase : Dict = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
_lowerCamelCase : Optional[Any] = bool(qa["""answers"""]["""text"""] )
return qid_to_has_ans
def _snake_case ( __snake_case : Union[str, Any] ):
"""simple docstring"""
def remove_articles(__snake_case : List[str] ):
return ARTICLES_REGEX.sub(""" """ , __snake_case )
def white_space_fix(__snake_case : Optional[int] ):
return " ".join(text.split() )
def remove_punc(__snake_case : Optional[Any] ):
_lowerCamelCase : str = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__snake_case : Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__snake_case ) ) ) )
def _snake_case ( __snake_case : Any ):
"""simple docstring"""
if not s:
return []
return normalize_answer(__snake_case ).split()
def _snake_case ( __snake_case : Dict , __snake_case : int ):
"""simple docstring"""
return int(normalize_answer(__snake_case ) == normalize_answer(__snake_case ) )
def _snake_case ( __snake_case : Tuple , __snake_case : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = get_tokens(__snake_case )
_lowerCamelCase : int = get_tokens(__snake_case )
_lowerCamelCase : Optional[Any] = collections.Counter(__snake_case ) & collections.Counter(__snake_case )
_lowerCamelCase : int = sum(common.values() )
if len(__snake_case ) == 0 or len(__snake_case ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
_lowerCamelCase : List[str] = 1.0 * num_same / len(__snake_case )
_lowerCamelCase : Dict = 1.0 * num_same / len(__snake_case )
_lowerCamelCase : int = (2 * precision * recall) / (precision + recall)
return fa
def _snake_case ( __snake_case : Optional[Any] , __snake_case : str ):
"""simple docstring"""
_lowerCamelCase : List[Any] = {}
_lowerCamelCase : Optional[int] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
_lowerCamelCase : Tuple = qa["""id"""]
_lowerCamelCase : Optional[int] = [t for t in qa["""answers"""]["""text"""] if normalize_answer(__snake_case )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
_lowerCamelCase : List[str] = [""""""]
if qid not in preds:
print(F'Missing prediction for {qid}' )
continue
_lowerCamelCase : Any = preds[qid]
# Take max over all gold answers
_lowerCamelCase : Tuple = max(compute_exact(__snake_case , __snake_case ) for a in gold_answers )
_lowerCamelCase : List[str] = max(compute_fa(__snake_case , __snake_case ) for a in gold_answers )
return exact_scores, fa_scores
def _snake_case ( __snake_case : List[Any] , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : str ):
"""simple docstring"""
_lowerCamelCase : str = {}
for qid, s in scores.items():
_lowerCamelCase : Optional[Any] = na_probs[qid] > na_prob_thresh
if pred_na:
_lowerCamelCase : Any = float(not qid_to_has_ans[qid] )
else:
_lowerCamelCase : List[str] = s
return new_scores
def _snake_case ( __snake_case : Any , __snake_case : int , __snake_case : Any=None ):
"""simple docstring"""
if not qid_list:
_lowerCamelCase : int = len(__snake_case )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores.values() ) / total),
("""f1""", 100.0 * sum(fa_scores.values() ) / total),
("""total""", total),
] )
else:
_lowerCamelCase : List[str] = len(__snake_case )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("""f1""", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("""total""", total),
] )
def _snake_case ( __snake_case : Optional[Any] , __snake_case : Optional[Any] , __snake_case : int ):
"""simple docstring"""
for k in new_eval:
_lowerCamelCase : Optional[int] = new_eval[k]
def _snake_case ( __snake_case : Tuple , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : Tuple ):
"""simple docstring"""
plt.step(__snake_case , __snake_case , color="""b""" , alpha=0.2 , where="""post""" )
plt.fill_between(__snake_case , __snake_case , step="""post""" , alpha=0.2 , color="""b""" )
plt.xlabel("""Recall""" )
plt.ylabel("""Precision""" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__snake_case )
plt.savefig(__snake_case )
plt.clf()
def _snake_case ( __snake_case : int , __snake_case : Dict , __snake_case : int , __snake_case : Union[str, Any] , __snake_case : Union[str, Any]=None , __snake_case : List[Any]=None ):
"""simple docstring"""
_lowerCamelCase : Any = sorted(__snake_case , key=lambda __snake_case : na_probs[k] )
_lowerCamelCase : List[Any] = 0.0
_lowerCamelCase : Optional[Any] = 1.0
_lowerCamelCase : Tuple = 0.0
_lowerCamelCase : Tuple = [1.0]
_lowerCamelCase : List[Any] = [0.0]
_lowerCamelCase : Optional[int] = 0.0
for i, qid in enumerate(__snake_case ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
_lowerCamelCase : List[str] = true_pos / float(i + 1 )
_lowerCamelCase : Optional[int] = true_pos / float(__snake_case )
if i == len(__snake_case ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__snake_case )
recalls.append(__snake_case )
if out_image:
plot_pr_curve(__snake_case , __snake_case , __snake_case , __snake_case )
return {"ap": 100.0 * avg_prec}
def _snake_case ( __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : Optional[Any] ):
"""simple docstring"""
if out_image_dir and not os.path.exists(__snake_case ):
os.makedirs(__snake_case )
_lowerCamelCase : str = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
_lowerCamelCase : List[str] = make_precision_recall_eval(
__snake_case , __snake_case , __snake_case , __snake_case , out_image=os.path.join(__snake_case , """pr_exact.png""" ) , title="""Precision-Recall curve for Exact Match score""" , )
_lowerCamelCase : int = make_precision_recall_eval(
__snake_case , __snake_case , __snake_case , __snake_case , out_image=os.path.join(__snake_case , """pr_f1.png""" ) , title="""Precision-Recall curve for F1 score""" , )
_lowerCamelCase : Union[str, Any] = {k: float(__snake_case ) for k, v in qid_to_has_ans.items()}
_lowerCamelCase : Optional[Any] = make_precision_recall_eval(
__snake_case , __snake_case , __snake_case , __snake_case , out_image=os.path.join(__snake_case , """pr_oracle.png""" ) , title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" , )
merge_eval(__snake_case , __snake_case , """pr_exact""" )
merge_eval(__snake_case , __snake_case , """pr_f1""" )
merge_eval(__snake_case , __snake_case , """pr_oracle""" )
def _snake_case ( __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : str , __snake_case : Optional[int] ):
"""simple docstring"""
if not qid_list:
return
_lowerCamelCase : Tuple = [na_probs[k] for k in qid_list]
_lowerCamelCase : int = np.ones_like(__snake_case ) / float(len(__snake_case ) )
plt.hist(__snake_case , weights=__snake_case , bins=20 , range=(0.0, 1.0) )
plt.xlabel("""Model probability of no-answer""" )
plt.ylabel("""Proportion of dataset""" )
plt.title(F'Histogram of no-answer probability: {name}' )
plt.savefig(os.path.join(__snake_case , F'na_prob_hist_{name}.png' ) )
plt.clf()
def _snake_case ( __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : List[str] , __snake_case : Any ):
"""simple docstring"""
_lowerCamelCase : int = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
_lowerCamelCase : Optional[Any] = num_no_ans
_lowerCamelCase : str = cur_score
_lowerCamelCase : Dict = 0.0
_lowerCamelCase : Optional[int] = sorted(__snake_case , key=lambda __snake_case : na_probs[k] )
for i, qid in enumerate(__snake_case ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
_lowerCamelCase : List[str] = scores[qid]
else:
if preds[qid]:
_lowerCamelCase : str = -1
else:
_lowerCamelCase : List[str] = 0
cur_score += diff
if cur_score > best_score:
_lowerCamelCase : Any = cur_score
_lowerCamelCase : Dict = na_probs[qid]
return 100.0 * best_score / len(__snake_case ), best_thresh
def _snake_case ( __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : str , __snake_case : Dict ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : str = find_best_thresh(__snake_case , __snake_case , __snake_case , __snake_case )
_lowerCamelCase , _lowerCamelCase : List[Any] = find_best_thresh(__snake_case , __snake_case , __snake_case , __snake_case )
_lowerCamelCase : Dict = best_exact
_lowerCamelCase : Optional[int] = exact_thresh
_lowerCamelCase : Any = best_fa
_lowerCamelCase : Any = fa_thresh
def _snake_case ( ):
"""simple docstring"""
with open(OPTS.data_file ) as f:
_lowerCamelCase : Union[str, Any] = json.load(__snake_case )
_lowerCamelCase : Dict = dataset_json["""data"""]
with open(OPTS.pred_file ) as f:
_lowerCamelCase : Union[str, Any] = json.load(__snake_case )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
_lowerCamelCase : Any = json.load(__snake_case )
else:
_lowerCamelCase : Optional[Any] = {k: 0.0 for k in preds}
_lowerCamelCase : List[Any] = make_qid_to_has_ans(__snake_case ) # maps qid to True/False
_lowerCamelCase : Union[str, Any] = [k for k, v in qid_to_has_ans.items() if v]
_lowerCamelCase : Optional[int] = [k for k, v in qid_to_has_ans.items() if not v]
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = get_raw_scores(__snake_case , __snake_case )
_lowerCamelCase : Dict = apply_no_ans_threshold(__snake_case , __snake_case , __snake_case , OPTS.na_prob_thresh )
_lowerCamelCase : List[str] = apply_no_ans_threshold(__snake_case , __snake_case , __snake_case , OPTS.na_prob_thresh )
_lowerCamelCase : Optional[Any] = make_eval_dict(__snake_case , __snake_case )
if has_ans_qids:
_lowerCamelCase : Any = make_eval_dict(__snake_case , __snake_case , qid_list=__snake_case )
merge_eval(__snake_case , __snake_case , """HasAns""" )
if no_ans_qids:
_lowerCamelCase : List[Any] = make_eval_dict(__snake_case , __snake_case , qid_list=__snake_case )
merge_eval(__snake_case , __snake_case , """NoAns""" )
if OPTS.na_prob_file:
find_all_best_thresh(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , OPTS.out_image_dir )
histogram_na_prob(__snake_case , __snake_case , OPTS.out_image_dir , """hasAns""" )
histogram_na_prob(__snake_case , __snake_case , OPTS.out_image_dir , """noAns""" )
if OPTS.out_file:
with open(OPTS.out_file , """w""" ) as f:
json.dump(__snake_case , __snake_case )
else:
print(json.dumps(__snake_case , indent=2 ) )
if __name__ == "__main__":
UpperCAmelCase = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("""Agg""")
import matplotlib.pyplot as plt
main()
| 88 |
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
snake_case__ : List[str] = logging.get_logger(__name__)
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = """vision-encoder-decoder"""
A_ = True
def __init__( self , **_UpperCAmelCase ) -> Dict:
super().__init__(**_UpperCAmelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f"""A configuraton of type {self.model_type} cannot be instantiated because """
f"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
UpperCamelCase_ = kwargs.pop('encoder' )
UpperCamelCase_ = encoder_config.pop('model_type' )
UpperCamelCase_ = kwargs.pop('decoder' )
UpperCamelCase_ = decoder_config.pop('model_type' )
UpperCamelCase_ = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
UpperCamelCase_ = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
UpperCamelCase_ = True
@classmethod
def _UpperCAmelCase ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> PretrainedConfig:
logger.info('Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
UpperCamelCase_ = True
UpperCamelCase_ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = copy.deepcopy(self.__dict__ )
UpperCamelCase_ = self.encoder.to_dict()
UpperCamelCase_ = self.decoder.to_dict()
UpperCamelCase_ = self.__class__.model_type
return output
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = version.parse("""1.11""" )
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _UpperCAmelCase ( self ) -> float:
return 1e-4
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict({'last_hidden_state': {0: 'batch', 1: 'encoder_sequence'}} )
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
UpperCamelCase_ = OrderedDict()
UpperCamelCase_ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
UpperCamelCase_ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
UpperCamelCase_ = {0: 'batch', 1: 'encoder_sequence'}
return common_inputs
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ) -> Mapping[str, Any]:
import torch
UpperCamelCase_ = OrderedDict()
UpperCamelCase_ = super().generate_dummy_inputs(
_UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = dummy_input['input_ids'].shape
UpperCamelCase_ = (batch, encoder_sequence, self._config.encoder_hidden_size)
UpperCamelCase_ = dummy_input.pop('input_ids' )
UpperCamelCase_ = dummy_input.pop('attention_mask' )
UpperCamelCase_ = torch.zeros(_UpperCAmelCase )
return common_inputs
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
@property
def _UpperCAmelCase ( self ) -> None:
pass
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> OnnxConfig:
return VisionEncoderDecoderEncoderOnnxConfig(_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = "default" ) -> OnnxConfig:
UpperCamelCase_ = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_UpperCAmelCase , _UpperCAmelCase )
| 23 | 0 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
SCREAMING_SNAKE_CASE : Union[str, Any] = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class _lowerCamelCase( unittest.TestCase ):
lowercase_ : Tuple = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowercase_ : Union[str, Any] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowercase_ : Optional[Any] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowercase_ : Optional[int] = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Optional[int] = ZeroShotClassificationPipeline(
model=lowerCamelCase, tokenizer=lowerCamelCase, candidate_labels=['polics', 'health'])
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : List[Any] = classifier('Who are you voting for in 2020?', candidate_labels='politics')
self.assertEqual(lowerCamelCase, {'sequence': ANY(lowerCamelCase), 'labels': [ANY(lowerCamelCase)], 'scores': [ANY(lowerCamelCase)]})
# No kwarg
_lowercase : str = classifier('Who are you voting for in 2020?', ['politics'])
self.assertEqual(lowerCamelCase, {'sequence': ANY(lowerCamelCase), 'labels': [ANY(lowerCamelCase)], 'scores': [ANY(lowerCamelCase)]})
_lowercase : int = classifier('Who are you voting for in 2020?', candidate_labels=['politics'])
self.assertEqual(lowerCamelCase, {'sequence': ANY(lowerCamelCase), 'labels': [ANY(lowerCamelCase)], 'scores': [ANY(lowerCamelCase)]})
_lowercase : Dict = classifier('Who are you voting for in 2020?', candidate_labels='politics, public health')
self.assertEqual(
lowerCamelCase, {'sequence': ANY(lowerCamelCase), 'labels': [ANY(lowerCamelCase), ANY(lowerCamelCase)], 'scores': [ANY(lowerCamelCase), ANY(lowerCamelCase)]})
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'])), 1.0)
_lowercase : List[str] = classifier('Who are you voting for in 2020?', candidate_labels=['politics', 'public health'])
self.assertEqual(
lowerCamelCase, {'sequence': ANY(lowerCamelCase), 'labels': [ANY(lowerCamelCase), ANY(lowerCamelCase)], 'scores': [ANY(lowerCamelCase), ANY(lowerCamelCase)]})
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'])), 1.0)
_lowercase : List[str] = classifier(
'Who are you voting for in 2020?', candidate_labels='politics', hypothesis_template='This text is about {}')
self.assertEqual(lowerCamelCase, {'sequence': ANY(lowerCamelCase), 'labels': [ANY(lowerCamelCase)], 'scores': [ANY(lowerCamelCase)]})
# https://github.com/huggingface/transformers/issues/13846
_lowercase : Dict = classifier(['I am happy'], ['positive', 'negative'])
self.assertEqual(
lowerCamelCase, [
{'sequence': ANY(lowerCamelCase), 'labels': [ANY(lowerCamelCase), ANY(lowerCamelCase)], 'scores': [ANY(lowerCamelCase), ANY(lowerCamelCase)]}
for i in range(1)
], )
_lowercase : List[str] = classifier(['I am happy', 'I am sad'], ['positive', 'negative'])
self.assertEqual(
lowerCamelCase, [
{'sequence': ANY(lowerCamelCase), 'labels': [ANY(lowerCamelCase), ANY(lowerCamelCase)], 'scores': [ANY(lowerCamelCase), ANY(lowerCamelCase)]}
for i in range(2)
], )
with self.assertRaises(lowerCamelCase):
classifier('', candidate_labels='politics')
with self.assertRaises(lowerCamelCase):
classifier(lowerCamelCase, candidate_labels='politics')
with self.assertRaises(lowerCamelCase):
classifier('Who are you voting for in 2020?', candidate_labels='')
with self.assertRaises(lowerCamelCase):
classifier('Who are you voting for in 2020?', candidate_labels=lowerCamelCase)
with self.assertRaises(lowerCamelCase):
classifier(
'Who are you voting for in 2020?', candidate_labels='politics', hypothesis_template='Not formatting template', )
with self.assertRaises(lowerCamelCase):
classifier(
'Who are you voting for in 2020?', candidate_labels='politics', hypothesis_template=lowerCamelCase, )
self.run_entailment_id(lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : Tuple = zero_shot_classifier.model.config
_lowercase : Tuple = config.labelaid
_lowercase : int = zero_shot_classifier.entailment_id
_lowercase : Any = {'LABEL_0': 0, 'LABEL_1': 1, 'LABEL_2': 2}
self.assertEqual(zero_shot_classifier.entailment_id, -1)
_lowercase : Optional[Any] = {'entailment': 0, 'neutral': 1, 'contradiction': 2}
self.assertEqual(zero_shot_classifier.entailment_id, 0)
_lowercase : Tuple = {'ENTAIL': 0, 'NON-ENTAIL': 1}
self.assertEqual(zero_shot_classifier.entailment_id, 0)
_lowercase : Optional[int] = {'ENTAIL': 2, 'NEUTRAL': 1, 'CONTR': 0}
self.assertEqual(zero_shot_classifier.entailment_id, 2)
_lowercase : Union[str, Any] = original_labelaid
self.assertEqual(lowerCamelCase, zero_shot_classifier.entailment_id)
@require_torch
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[Any] = pipeline(
'zero-shot-classification', model='sshleifer/tiny-distilbert-base-cased-distilled-squad', framework='pt', )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'Who are you voting for in 2020?' * 1_00, candidate_labels=['politics', 'public health', 'science'])
@require_torch
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = pipeline(
'zero-shot-classification', model='sshleifer/tiny-distilbert-base-cased-distilled-squad', framework='pt', )
_lowercase : Tuple = zero_shot_classifier(
'Who are you voting for in 2020?', candidate_labels=['politics', 'public health', 'science'])
self.assertEqual(
nested_simplify(lowerCamelCase), {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.3_3_3, 0.3_3_3, 0.3_3_3],
}, )
@require_tf
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : List[str] = pipeline(
'zero-shot-classification', model='sshleifer/tiny-distilbert-base-cased-distilled-squad', framework='tf', )
_lowercase : str = zero_shot_classifier(
'Who are you voting for in 2020?', candidate_labels=['politics', 'public health', 'science'])
self.assertEqual(
nested_simplify(lowerCamelCase), {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.3_3_3, 0.3_3_3, 0.3_3_3],
}, )
@slow
@require_torch
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : str = pipeline('zero-shot-classification', model='roberta-large-mnli', framework='pt')
_lowercase : Union[str, Any] = zero_shot_classifier(
'Who are you voting for in 2020?', candidate_labels=['politics', 'public health', 'science'])
self.assertEqual(
nested_simplify(lowerCamelCase), {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.9_7_6, 0.0_1_5, 0.0_0_9],
}, )
_lowercase : Optional[Any] = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.', candidate_labels=['machine learning', 'statistics', 'translation', 'vision'], multi_label=lowerCamelCase, )
self.assertEqual(
nested_simplify(lowerCamelCase), {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
}, )
@slow
@require_tf
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[int] = pipeline('zero-shot-classification', model='roberta-large-mnli', framework='tf')
_lowercase : Optional[Any] = zero_shot_classifier(
'Who are you voting for in 2020?', candidate_labels=['politics', 'public health', 'science'])
self.assertEqual(
nested_simplify(lowerCamelCase), {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.9_7_6, 0.0_1_5, 0.0_0_9],
}, )
_lowercase : int = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.', candidate_labels=['machine learning', 'statistics', 'translation', 'vision'], multi_label=lowerCamelCase, )
self.assertEqual(
nested_simplify(lowerCamelCase), {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
}, )
| 89 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _snake_case (__lowercase , __lowercase , __lowercase):
# Initialise PyTorch model
UpperCamelCase_ = MobileBertConfig.from_json_file(__lowercase)
print(f"""Building PyTorch model from configuration: {config}""")
UpperCamelCase_ = MobileBertForPreTraining(__lowercase)
# Load weights from tf checkpoint
UpperCamelCase_ = load_tf_weights_in_mobilebert(__lowercase , __lowercase , __lowercase)
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""")
torch.save(model.state_dict() , __lowercase)
if __name__ == "__main__":
snake_case__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--mobilebert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained MobileBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
snake_case__ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 23 | 0 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(a__ )
class a__ ( a__ ):
'''simple docstring'''
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_ ) -> Optional[Any]:
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=None ) -> Union[str, Any]:
lowerCAmelCase__ = {}
if top_k is not None:
lowerCAmelCase__ = top_k
return {}, {}, postprocess_params
def __call__( self , lowerCamelCase_ , **lowerCamelCase_ ) -> Tuple:
return super().__call__(lowerCamelCase_ , **lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Optional[int]:
lowerCAmelCase__ = load_image(lowerCamelCase_ )
lowerCAmelCase__ = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
return model_inputs
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Dict:
lowerCAmelCase__ = self.model(**lowerCamelCase_ )
return model_outputs
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=5 ) -> List[Any]:
if top_k > self.model.config.num_labels:
lowerCAmelCase__ = self.model.config.num_labels
if self.framework == "pt":
lowerCAmelCase__ = model_outputs.logits.softmax(-1 )[0]
lowerCAmelCase__ , lowerCAmelCase__ = probs.topk(lowerCamelCase_ )
elif self.framework == "tf":
lowerCAmelCase__ = stable_softmax(model_outputs.logits , axis=-1 )[0]
lowerCAmelCase__ = tf.math.top_k(lowerCamelCase_ , k=lowerCamelCase_ )
lowerCAmelCase__ , lowerCAmelCase__ = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
lowerCAmelCase__ = scores.tolist()
lowerCAmelCase__ = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCamelCase_ , lowerCamelCase_ )] | 90 |
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _a ( unittest.TestCase ):
"""simple docstring"""
A_ = MODEL_FOR_MASKED_LM_MAPPING
A_ = TF_MODEL_FOR_MASKED_LM_MAPPING
def _UpperCAmelCase ( self ) -> List[str]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='tf' )
UpperCamelCase_ = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{'sequence': 'My name is grouped', 'score': 2.1e-05, 'token': 38015, 'token_str': ' grouped'},
{'sequence': 'My name is accuser', 'score': 2.1e-05, 'token': 25506, 'token_str': ' accuser'},
] , )
UpperCamelCase_ = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{
'sequence': 'The largest city in France is grouped',
'score': 2.1e-05,
'token': 38015,
'token_str': ' grouped',
},
{
'sequence': 'The largest city in France is accuser',
'score': 2.1e-05,
'token': 25506,
'token_str': ' accuser',
},
] , )
UpperCamelCase_ = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{'sequence': 'My name is Clara', 'score': 2e-05, 'token': 13606, 'token_str': ' Clara'},
{'sequence': 'My name is Patrick', 'score': 2e-05, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 1.9e-05, 'token': 2941, 'token_str': ' Te'},
] , )
@require_torch
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='pt' )
UpperCamelCase_ = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{'sequence': 'My name is Maul', 'score': 2.2e-05, 'token': 35676, 'token_str': ' Maul'},
{'sequence': 'My name isELS', 'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS'},
] , )
UpperCamelCase_ = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{
'sequence': 'The largest city in France is Maul',
'score': 2.2e-05,
'token': 35676,
'token_str': ' Maul',
},
{'sequence': 'The largest city in France isELS', 'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS'},
] , )
UpperCamelCase_ = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{'sequence': 'My name is Patrick', 'score': 2.1e-05, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 2e-05, 'token': 2941, 'token_str': ' Te'},
{'sequence': 'My name is Clara', 'score': 2e-05, 'token': 13606, 'token_str': ' Clara'},
] , )
UpperCamelCase_ = unmasker('My name is <mask> <mask>' , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
[
{
'score': 2.2e-05,
'token': 35676,
'token_str': ' Maul',
'sequence': '<s>My name is Maul<mask></s>',
},
{'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name isELS<mask></s>'},
],
[
{
'score': 2.2e-05,
'token': 35676,
'token_str': ' Maul',
'sequence': '<s>My name is<mask> Maul</s>',
},
{'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name is<mask>ELS</s>'},
],
] , )
@require_torch_gpu
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = pipeline('fill-mask' , model='hf-internal-testing/tiny-random-distilbert' , device=0 , framework='pt' )
# convert model to fp16
pipe.model.half()
UpperCamelCase_ = pipe('Paris is the [MASK] of France.' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
@slow
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='pt' )
self.run_large_test(_UpperCAmelCase )
@slow
@require_tf
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='tf' )
self.run_large_test(_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Tuple:
UpperCamelCase_ = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , [
{'sequence': 'My name is John', 'score': 0.0_0_8, 'token': 610, 'token_str': ' John'},
{'sequence': 'My name is Chris', 'score': 0.0_0_7, 'token': 1573, 'token_str': ' Chris'},
] , )
UpperCamelCase_ = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , [
{
'sequence': 'The largest city in France is Paris',
'score': 0.2_5_1,
'token': 2201,
'token_str': ' Paris',
},
{
'sequence': 'The largest city in France is Lyon',
'score': 0.2_1_4,
'token': 12790,
'token_str': ' Lyon',
},
] , )
UpperCamelCase_ = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , [
{'sequence': 'My name is Patrick', 'score': 0.0_0_5, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Clara', 'score': 0.0_0_0, 'token': 13606, 'token_str': ' Clara'},
{'sequence': 'My name is Te', 'score': 0.0_0_0, 'token': 2941, 'token_str': ' Te'},
] , )
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='pt' )
UpperCamelCase_ = None
UpperCamelCase_ = None
self.run_pipeline_test(_UpperCAmelCase , [] )
@require_tf
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='tf' )
UpperCamelCase_ = None
UpperCamelCase_ = None
self.run_pipeline_test(_UpperCAmelCase , [] )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('The provided tokenizer has no mask token, (probably reformer or wav2vec2)' )
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = [
f"""This is another {tokenizer.mask_token} test""",
]
return fill_masker, examples
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
UpperCamelCase_ = fill_masker.tokenizer
UpperCamelCase_ = fill_masker.model
UpperCamelCase_ = fill_masker(
f"""This is a {tokenizer.mask_token}""" , )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = fill_masker([f"""This is a {tokenizer.mask_token}"""] )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = fill_masker([f"""This is a {tokenizer.mask_token}""", f"""Another {tokenizer.mask_token} great test."""] )
self.assertEqual(
_UpperCAmelCase , [
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
] , )
with self.assertRaises(_UpperCAmelCase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_UpperCAmelCase ):
fill_masker('This is' )
self.run_test_top_k(_UpperCAmelCase , _UpperCAmelCase )
self.run_test_targets(_UpperCAmelCase , _UpperCAmelCase )
self.run_test_top_k_targets(_UpperCAmelCase , _UpperCAmelCase )
self.fill_mask_with_duplicate_targets_and_top_k(_UpperCAmelCase , _UpperCAmelCase )
self.fill_mask_with_multiple_masks(_UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
UpperCamelCase_ = tokenizer.get_vocab()
UpperCamelCase_ = sorted(vocab.keys() )[:2]
# Pipeline argument
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , targets=_UpperCAmelCase )
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , _UpperCAmelCase )
UpperCamelCase_ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(_UpperCAmelCase ) )
# Call argument
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , _UpperCAmelCase )
UpperCamelCase_ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(_UpperCAmelCase ) )
# Score equivalence
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase )
UpperCamelCase_ = [top_mask['token_str'] for top_mask in outputs]
UpperCamelCase_ = [top_mask['score'] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_UpperCAmelCase ) == set(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase )
UpperCamelCase_ = [top_mask['score'] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) )
# Raises with invalid
with self.assertRaises(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[''] )
with self.assertRaises(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets='' )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , top_k=2 )
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
UpperCamelCase_ = tokenizer.get_vocab()
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
# top_k=2, ntargets=3
UpperCamelCase_ = sorted(vocab.keys() )[:3]
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=_UpperCAmelCase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
UpperCamelCase_ = [el['token_str'] for el in sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x["score"] , reverse=_UpperCAmelCase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_UpperCAmelCase ).issubset(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=_UpperCAmelCase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = tokenizer.get_vocab()
# String duplicates + id duplicates
UpperCamelCase_ = sorted(vocab.keys() )[:3]
UpperCamelCase_ = [targets[0], targets[1], targets[0], targets[2], targets[1]]
UpperCamelCase_ = fill_masker(f"""My name is {tokenizer.mask_token}""" , targets=_UpperCAmelCase , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_UpperCAmelCase ) , 3 )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = fill_masker(
f"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
_UpperCAmelCase , [
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
] , )
| 23 | 0 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( snake_case__ : list[int] , snake_case__ : list[int] , snake_case__ : list[int] , snake_case__ : list[list[str]] , snake_case__ : int , ):
A = len(snake_case__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['. ' * i + 'Q ' + '. ' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(snake_case__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , snake_case__ , snake_case__ , )
def _snake_case ( snake_case__ : int ):
A = []
depth_first_search([] , [] , [] , snake_case__ , snake_case__ )
# Print all the boards
for board in boards:
for column in board:
print(snake_case__ )
print('' )
print(len(snake_case__ ) , 'solutions were found.' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4) | 91 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = StableDiffusionSAGPipeline
A_ = TEXT_TO_IMAGE_PARAMS
A_ = TEXT_TO_IMAGE_BATCH_PARAMS
A_ = TEXT_TO_IMAGE_IMAGE_PARAMS
A_ = TEXT_TO_IMAGE_IMAGE_PARAMS
A_ = False
def _UpperCAmelCase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCamelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
UpperCamelCase_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , )
torch.manual_seed(0 )
UpperCamelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCamelCase_ = CLIPTextModel(_UpperCAmelCase )
UpperCamelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCamelCase_ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase=0 ) -> List[Any]:
if str(_UpperCAmelCase ).startswith('mps' ):
UpperCamelCase_ = torch.manual_seed(_UpperCAmelCase )
else:
UpperCamelCase_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
UpperCamelCase_ = {
'prompt': '.',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 1.0,
'sag_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def _UpperCAmelCase ( self ) -> Tuple:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
UpperCamelCase_ = sag_pipe.to(_UpperCAmelCase )
sag_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCamelCase_ = '.'
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = sag_pipe(
[prompt] , generator=_UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
UpperCamelCase_ = output.images
UpperCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase_ = np.array([0.1_5_6_8, 0.1_7_3_8, 0.1_6_9_5, 0.1_6_9_3, 0.1_5_0_7, 0.1_7_0_5, 0.1_5_4_7, 0.1_7_5_1, 0.1_9_4_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
UpperCamelCase_ = sag_pipe.to(_UpperCAmelCase )
sag_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCamelCase_ = '.'
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = sag_pipe(
[prompt] , generator=_UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
UpperCamelCase_ = output.images
UpperCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase_ = np.array([0.3_4_5_9, 0.2_8_7_6, 0.2_5_3_7, 0.3_0_0_2, 0.2_6_7_1, 0.2_1_6_0, 0.3_0_2_6, 0.2_2_6_2, 0.2_3_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
UpperCamelCase_ = sag_pipe.to(_UpperCAmelCase )
sag_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCamelCase_ = '.'
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = sag_pipe(
[prompt] , width=768 , height=512 , generator=_UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' , )
UpperCamelCase_ = output.images
assert image.shape == (1, 512, 768, 3)
| 23 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase_ = {
"""configuration_clip""": [
"""CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPConfig""",
"""CLIPOnnxConfig""",
"""CLIPTextConfig""",
"""CLIPVisionConfig""",
],
"""processing_clip""": ["""CLIPProcessor"""],
"""tokenization_clip""": ["""CLIPTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""CLIPTokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""CLIPFeatureExtractor"""]
UpperCamelCase_ = ["""CLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPModel""",
"""CLIPPreTrainedModel""",
"""CLIPTextModel""",
"""CLIPTextModelWithProjection""",
"""CLIPVisionModel""",
"""CLIPVisionModelWithProjection""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCLIPModel""",
"""TFCLIPPreTrainedModel""",
"""TFCLIPTextModel""",
"""TFCLIPVisionModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""FlaxCLIPModel""",
"""FlaxCLIPPreTrainedModel""",
"""FlaxCLIPTextModel""",
"""FlaxCLIPTextPreTrainedModel""",
"""FlaxCLIPVisionModel""",
"""FlaxCLIPVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 92 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
snake_case__ : List[str] = TypeVar("""T""")
def _snake_case (__lowercase):
return (position - 1) // 2
def _snake_case (__lowercase):
return (2 * position) + 1
def _snake_case (__lowercase):
return (2 * position) + 2
class _a ( Generic[T] ):
"""simple docstring"""
def __init__( self ) -> None:
UpperCamelCase_ = []
UpperCamelCase_ = {}
UpperCamelCase_ = 0
def __len__( self ) -> int:
return self.elements
def __repr__( self ) -> str:
return str(self.heap )
def _UpperCAmelCase ( self ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
UpperCamelCase_ = self.elements
self.elements += 1
self._bubble_up(_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
UpperCamelCase_ , UpperCamelCase_ = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
UpperCamelCase_ , UpperCamelCase_ = self.heap[0]
self._bubble_down(_UpperCAmelCase )
return elem
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> None:
# Update the weight of the given key
UpperCamelCase_ = self.position_map[elem]
UpperCamelCase_ = (elem, weight)
if position > 0:
UpperCamelCase_ = get_parent_position(_UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(_UpperCAmelCase )
else:
self._bubble_down(_UpperCAmelCase )
else:
self._bubble_down(_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
UpperCamelCase_ = self.position_map[elem]
if curr_pos == 0:
return None
UpperCamelCase_ = get_parent_position(_UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = self.heap[curr_pos]
UpperCamelCase_ , UpperCamelCase_ = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_up(_UpperCAmelCase )
return None
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
UpperCamelCase_ = self.position_map[elem]
UpperCamelCase_ , UpperCamelCase_ = self.heap[curr_pos]
UpperCamelCase_ = get_child_left_position(_UpperCAmelCase )
UpperCamelCase_ = get_child_right_position(_UpperCAmelCase )
if child_left_position < self.elements and child_right_position < self.elements:
UpperCamelCase_ , UpperCamelCase_ = self.heap[child_left_position]
UpperCamelCase_ , UpperCamelCase_ = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_down(_UpperCAmelCase )
if child_left_position < self.elements:
UpperCamelCase_ , UpperCamelCase_ = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_down(_UpperCAmelCase )
else:
return None
if child_right_position < self.elements:
UpperCamelCase_ , UpperCamelCase_ = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_down(_UpperCAmelCase )
return None
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> None:
# Swap the nodes at the given positions
UpperCamelCase_ = self.heap[nodea_pos][0]
UpperCamelCase_ = self.heap[nodea_pos][0]
UpperCamelCase_ , UpperCamelCase_ = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
UpperCamelCase_ = nodea_pos
UpperCamelCase_ = nodea_pos
class _a ( Generic[T] ):
"""simple docstring"""
def __init__( self ) -> None:
UpperCamelCase_ = {}
UpperCamelCase_ = 0
def __repr__( self ) -> str:
return str(self.connections )
def __len__( self ) -> int:
return self.nodes
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
UpperCamelCase_ = {}
self.nodes += 1
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(_UpperCAmelCase )
self.add_node(_UpperCAmelCase )
UpperCamelCase_ = weight
UpperCamelCase_ = weight
def _snake_case (__lowercase , ):
UpperCamelCase_ = {node: maxsize for node in graph.connections}
UpperCamelCase_ = {node: None for node in graph.connections}
UpperCamelCase_ = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(__lowercase , __lowercase)
if priority_queue.is_empty():
return dist, parent
# initialization
UpperCamelCase_ = priority_queue.extract_min()
UpperCamelCase_ = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
UpperCamelCase_ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowercase , dist[neighbour])
UpperCamelCase_ = node
# running prim's algorithm
while not priority_queue.is_empty():
UpperCamelCase_ = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
UpperCamelCase_ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowercase , dist[neighbour])
UpperCamelCase_ = node
return dist, parent
| 23 | 0 |
"""simple docstring"""
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Any:
"""simple docstring"""
lowerCAmelCase__ :str = BigBirdConfig.from_json_file(_SCREAMING_SNAKE_CASE )
print(F"Building PyTorch model from configuration: {config}" )
if is_trivia_qa:
lowerCAmelCase__ :str = BigBirdForQuestionAnswering(_SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase__ :Optional[int] = BigBirdForPreTraining(_SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , is_trivia_qa=_SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--big_bird_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head."""
)
__A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 93 |
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
snake_case__ : Dict = TypeVar("""T""")
class _a ( Generic[T] ):
"""simple docstring"""
A_ = 42 # Cache store of keys
A_ = 42 # References of the keys in cache
A_ = 10 # Maximum capacity of cache
def __init__( self , _UpperCAmelCase ) -> None:
UpperCamelCase_ = deque()
UpperCamelCase_ = set()
if not n:
UpperCamelCase_ = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
UpperCamelCase_ = n
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
UpperCamelCase_ = self.dq_store.pop()
self.key_reference.remove(_UpperCAmelCase )
else:
self.dq_store.remove(_UpperCAmelCase )
self.dq_store.appendleft(_UpperCAmelCase )
self.key_reference.add(_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> None:
for k in self.dq_store:
print(_UpperCAmelCase )
def __repr__( self ) -> str:
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ : LRUCache[str | int] = LRUCache(4)
lru_cache.refer("""A""")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("""A""")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 23 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''transformers''', '''torch''', '''note_seq''']
def __init__( self : List[str] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Optional[Any] ) -> Dict:
'''simple docstring'''
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def A__ ( cls : Union[str, Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Any ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def A__ ( cls : Optional[int] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Dict ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 94 |
import numpy as np
def _snake_case (__lowercase):
return 1 / (1 + np.exp(-vector))
def _snake_case (__lowercase):
return vector * sigmoid(__lowercase)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23 | 0 |
"""simple docstring"""
import numpy as np
lowerCamelCase_ = [
['''a''', '''b''', '''c''', '''d''', '''e'''],
['''f''', '''g''', '''h''', '''i''', '''k'''],
['''l''', '''m''', '''n''', '''o''', '''p'''],
['''q''', '''r''', '''s''', '''t''', '''u'''],
['''v''', '''w''', '''x''', '''y''', '''z'''],
]
class UpperCamelCase_ :
def __init__( self : int ) -> None:
UpperCAmelCase_ : Union[str, Any] = np.array(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : str ) -> np.ndarray:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = np.where(letter == self.SQUARE )
UpperCAmelCase_ : int = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> str:
UpperCAmelCase_ : List[Any] = self.SQUARE[indexa - 1, indexa - 1]
return letter
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : str ) -> str:
UpperCAmelCase_ : Dict = message.lower()
UpperCAmelCase_ : str = message.replace(" " , "" )
UpperCAmelCase_ : List[Any] = message.replace("j" , "i" )
UpperCAmelCase_ : List[str] = np.empty((2, len(lowerCAmelCase_ )) )
for letter_index in range(len(lowerCAmelCase_ ) ):
UpperCAmelCase_ : Dict = self.letter_to_numbers(message[letter_index] )
UpperCAmelCase_ : Union[str, Any] = numbers[0]
UpperCAmelCase_ : Tuple = numbers[1]
UpperCAmelCase_ : Any = first_step.reshape(2 * len(lowerCAmelCase_ ) )
UpperCAmelCase_ : Tuple = ""
for numbers_index in range(len(lowerCAmelCase_ ) ):
UpperCAmelCase_ : Any = int(second_step[numbers_index * 2] )
UpperCAmelCase_ : List[Any] = int(second_step[(numbers_index * 2) + 1] )
UpperCAmelCase_ : Union[str, Any] = self.numbers_to_letter(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : int = encoded_message + letter
return encoded_message
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : str ) -> str:
UpperCAmelCase_ : Optional[int] = message.lower()
message.replace(" " , "" )
UpperCAmelCase_ : str = np.empty(2 * len(lowerCAmelCase_ ) )
for letter_index in range(len(lowerCAmelCase_ ) ):
UpperCAmelCase_ : str = self.letter_to_numbers(message[letter_index] )
UpperCAmelCase_ : List[Any] = numbers[0]
UpperCAmelCase_ : Tuple = numbers[1]
UpperCAmelCase_ : Any = first_step.reshape((2, len(lowerCAmelCase_ )) )
UpperCAmelCase_ : List[Any] = ""
for numbers_index in range(len(lowerCAmelCase_ ) ):
UpperCAmelCase_ : Optional[Any] = int(second_step[0, numbers_index] )
UpperCAmelCase_ : Tuple = int(second_step[1, numbers_index] )
UpperCAmelCase_ : str = self.numbers_to_letter(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = decoded_message + letter
return decoded_message
| 95 |
import math
from datetime import datetime, timedelta
def _snake_case (__lowercase):
UpperCamelCase_ = year % 19
UpperCamelCase_ = year % 4
UpperCamelCase_ = year % 7
UpperCamelCase_ = math.floor(year / 100)
UpperCamelCase_ = math.floor((13 + 8 * leap_day_inhibits) / 25)
UpperCamelCase_ = leap_day_inhibits / 4
UpperCamelCase_ = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
UpperCamelCase_ = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
UpperCamelCase_ = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
UpperCamelCase_ = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(__lowercase , 4 , 19)
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(__lowercase , 4 , 18)
else:
return datetime(__lowercase , 3 , 22) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday))
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
snake_case__ : Dict = """will be""" if year > datetime.now().year else """was"""
print(f'Easter in {year} {tense} {gauss_easter(year)}')
| 23 | 0 |
"""simple docstring"""
def a ( __UpperCAmelCase : int = 1_0_0_0 ) -> int:
__magic_name__: int = 2**power
__magic_name__: Any = 0
while n:
__magic_name__, __magic_name__: int = r + n % 1_0, n // 1_0
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 96 |
import requests
def _snake_case (__lowercase , __lowercase):
UpperCamelCase_ = {'Content-Type': 'application/json'}
UpperCamelCase_ = requests.post(__lowercase , json={'text': message_body} , headers=__lowercase)
if response.status_code != 200:
UpperCamelCase_ = (
'Request to slack returned an error '
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(__lowercase)
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
| 23 | 0 |
def a ( snake_case__: int = 1_000_000 ):
'''simple docstring'''
lowercase_ = 1
lowercase_ = 1
lowercase_ = {1: 1}
for inputa in range(2 , snake_case__ ):
lowercase_ = 0
lowercase_ = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowercase_ = (3 * number) + 1
counter += 1
if inputa not in counters:
lowercase_ = counter
if counter > pre_counter:
lowercase_ = inputa
lowercase_ = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 97 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Dict:
with open(_UpperCAmelCase , encoding='utf-8' ) as input_file:
UpperCamelCase_ = re.compile(R'(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)' )
UpperCamelCase_ = input_file.read()
UpperCamelCase_ = regexp.search(_UpperCAmelCase )
return match
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Dict:
with open(_UpperCAmelCase , encoding='utf-8' ) as input_file:
UpperCamelCase_ = re.compile(R'#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()' , re.DOTALL )
UpperCamelCase_ = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCamelCase_ = regexp.finditer(_UpperCAmelCase )
UpperCamelCase_ = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ = Path('./datasets' )
UpperCamelCase_ = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(_UpperCAmelCase ) ):
raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" )
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = Path('./datasets' )
UpperCamelCase_ = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_print_statements(str(_UpperCAmelCase ) ):
raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 23 | 0 |
'''simple docstring'''
def a__ ( lowercase : int, lowercase : int ) -> str:
"""simple docstring"""
return "\n".join(
F"""{number} * {i} = {number * i}""" for i in range(1, number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 98 |
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def _snake_case (__lowercase=32 , __lowercase=10 , __lowercase=100 , __lowercase=1026 , __lowercase=True , __lowercase="data/tokenized_stories_train_wikitext103.jbl" , __lowercase="igf_context_pairs.jbl" , ):
set_seed(3)
# generate train_data and objective_set
UpperCamelCase_ , UpperCamelCase_ = generate_datasets(
__lowercase , __lowercase , number=__lowercase , min_len=1026 , trim=__lowercase)
# keeps model same across runs
set_seed(4)
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
UpperCamelCase_ = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# load pretrained model
UpperCamelCase_ = load_gpta('gpt2').to(__lowercase)
print('computing perplexity on objective set')
UpperCamelCase_ = compute_perplexity(__lowercase , __lowercase , __lowercase).item()
print('perplexity on objective set:' , __lowercase)
# collect igf pairs and save to file demo.jbl
collect_objective_set(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase)
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def _snake_case (__lowercase , __lowercase=15 , __lowercase=128 , __lowercase=100 , __lowercase="igf_model.pt" , ):
set_seed(42)
# Load pre-trained model
UpperCamelCase_ = GPTaLMHeadModel.from_pretrained('gpt2')
# Initialize secondary learner to use embedding weights of model
UpperCamelCase_ = SecondaryLearner(__lowercase)
# Train secondary learner
UpperCamelCase_ = train_secondary_learner(
__lowercase , __lowercase , max_epochs=__lowercase , batch_size=__lowercase , eval_freq=100 , igf_model_path=__lowercase , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase=32 , __lowercase=1000 , __lowercase=16 , __lowercase=1.0 , __lowercase=recopy_gpta , __lowercase=None , __lowercase=10 , __lowercase="gpt2_finetuned.pt" , ):
UpperCamelCase_ = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
UpperCamelCase_ = RandomSampler(__lowercase)
UpperCamelCase_ = DataLoader(__lowercase , sampler=__lowercase)
UpperCamelCase_ = max_steps // (len(__lowercase)) + 1
UpperCamelCase_ = 0
UpperCamelCase_ = torch.zeros((1, context_len) , dtype=torch.long , device=__lowercase)
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = recopy_model(__lowercase , __lowercase , __lowercase)
model.train()
if secondary_learner is not None:
secondary_learner.to(__lowercase)
secondary_learner.eval()
UpperCamelCase_ = []
UpperCamelCase_ = 0
UpperCamelCase_ = []
UpperCamelCase_ = []
# Compute the performance of the transformer model at the beginning
UpperCamelCase_ = compute_perplexity(__lowercase , __lowercase , __lowercase)
test_perps.append(__lowercase)
print('Test perplexity, step' , __lowercase , ':' , __lowercase)
for epoch in range(int(__lowercase)):
for step, example in enumerate(__lowercase):
torch.cuda.empty_cache()
UpperCamelCase_ = random.randint(0 , example.size(2) - context_len - 1)
UpperCamelCase_ = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
UpperCamelCase_ = model(__lowercase , labels=__lowercase)
UpperCamelCase_ = True
if secondary_learner is not None:
UpperCamelCase_ = secondary_learner.forward(
torch.tensor(__lowercase , dtype=torch.long , device=__lowercase).unsqueeze(0))[0].item()
observed_qs.append(float(__lowercase))
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
UpperCamelCase_ = -1
if predicted_q < threshold:
UpperCamelCase_ = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu()))
UpperCamelCase_ = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
UpperCamelCase_ = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0)
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
UpperCamelCase_ = compute_perplexity(__lowercase , __lowercase , __lowercase)
test_perps.append(__lowercase)
print('Test perplexity, step' , __lowercase , ':' , __lowercase)
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , __lowercase)
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def _snake_case ():
UpperCamelCase_ = argparse.ArgumentParser(description='Fine-tune a transformer model with IGF on a language modeling task')
# Required parameters
parser.add_argument(
'--data_dir' , default=__lowercase , type=__lowercase , required=__lowercase , help='The input data dir. Should contain data files for WikiText.' , )
parser.add_argument(
'--model_name_or_path' , default=__lowercase , type=__lowercase , required=__lowercase , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--data_file' , type=__lowercase , default=__lowercase , help=(
'A jbl file containing tokenized data which can be split as objective dataset, '
'train_dataset and test_dataset.'
) , )
parser.add_argument(
'--igf_data_file' , type=__lowercase , default=__lowercase , help='A jbl file containing the context and information gain pairs to train secondary learner.' , )
parser.add_argument(
'--output_dir' , default=__lowercase , type=__lowercase , required=__lowercase , help='The output directory where the final fine-tuned model is stored.' , )
parser.add_argument(
'--tokenizer_name' , default=__lowercase , type=__lowercase , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument('--seed' , type=__lowercase , default=__lowercase , help='A seed for reproducible training.')
parser.add_argument(
'--context_len' , default=32 , type=__lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--size_objective_set' , default=100 , type=__lowercase , help='number of articles that are long enough to be used as our objective set' , )
parser.add_argument(
'--eval_freq' , default=100 , type=__lowercase , help='secondary model evaluation is triggered at eval_freq')
parser.add_argument('--max_steps' , default=1000 , type=__lowercase , help='To calculate training epochs')
parser.add_argument(
'--secondary_learner_batch_size' , default=128 , type=__lowercase , help='batch size of training data for secondary learner' , )
parser.add_argument(
'--batch_size' , default=16 , type=__lowercase , help='batch size of training data of language model(gpt2) ')
parser.add_argument(
'--eval_interval' , default=10 , type=__lowercase , help=(
'decay the selectivity of our secondary learner filter from'
'1 standard deviation above average to 1 below average after 10 batches'
) , )
parser.add_argument(
'--number' , default=100 , type=__lowercase , help='The number of examples split to be used as objective_set/test_data')
parser.add_argument(
'--min_len' , default=1026 , type=__lowercase , help='The minimum length of the article to be used as objective set')
parser.add_argument(
'--secondary_learner_max_epochs' , default=15 , type=__lowercase , help='number of epochs to train secondary learner')
parser.add_argument('--trim' , default=__lowercase , type=__lowercase , help='truncate the example if it exceeds context length')
parser.add_argument(
'--threshold' , default=1.0 , type=__lowercase , help=(
'The threshold value used by secondary learner to filter the train_data and allow only'
' informative data as input to the model'
) , )
parser.add_argument('--finetuned_model_name' , default='gpt2_finetuned.pt' , type=__lowercase , help='finetuned_model_name')
parser.add_argument(
'--recopy_model' , default=__lowercase , type=__lowercase , help='Reset the model to the original pretrained GPT-2 weights after each iteration' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=__lowercase , data_file='data/tokenized_stories_train_wikitext103.jbl' , igf_data_file='igf_context_pairs.jbl' , )
# Load train data for secondary learner
UpperCamelCase_ = joblib.load('data/IGF_values.jbl')
# Train secondary learner
UpperCamelCase_ = training_secondary_learner(
__lowercase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='igf_model.pt' , )
# load pretrained gpt2 model
UpperCamelCase_ = GPTaLMHeadModel.from_pretrained('gpt2')
set_seed(42)
# Generate train and test data to train and evaluate gpt2 model
UpperCamelCase_ , UpperCamelCase_ = generate_datasets(
context_len=32 , file='data/tokenized_stories_train_wikitext103.jbl' , number=100 , min_len=1026 , trim=__lowercase)
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
__lowercase , __lowercase , __lowercase , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=__lowercase , secondary_learner=__lowercase , eval_interval=10 , finetuned_model_name='gpt2_finetuned.pt' , )
if __name__ == "__main__":
main()
| 23 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = ["""image_processor""", """tokenizer"""]
_lowerCamelCase = """ViTImageProcessor"""
_lowerCamelCase = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , __A=None , __A=None , **__A ):
__a = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __A , )
__a = kwargs.pop("""feature_extractor""" )
__a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__A , __A )
def __call__( self , __A=None , __A=None , __A=None , __A=None , **__A ):
if text is None and visual_prompt is None and images is None:
raise ValueError("""You have to specify either text, visual prompt or images.""" )
if text is not None and visual_prompt is not None:
raise ValueError("""You have to specify exactly one type of prompt. Either text or visual prompt.""" )
if text is not None:
__a = self.tokenizer(__A , return_tensors=__A , **__A )
if visual_prompt is not None:
__a = self.image_processor(__A , return_tensors=__A , **__A )
if images is not None:
__a = self.image_processor(__A , return_tensors=__A , **__A )
if visual_prompt is not None and images is not None:
__a = {
"""pixel_values""": image_features.pixel_values,
"""conditional_pixel_values""": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
__a = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
__a = {
"""conditional_pixel_values""": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def snake_case_ ( self , *__A , **__A ):
return self.tokenizer.batch_decode(*__A , **__A )
def snake_case_ ( self , *__A , **__A ):
return self.tokenizer.decode(*__A , **__A )
@property
def snake_case_ ( self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __A , )
return self.image_processor_class
@property
def snake_case_ ( self ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __A , )
return self.image_processor
| 99 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class _a :
"""simple docstring"""
A_ = MBartConfig
A_ = {}
A_ = """gelu"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=20 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , ) -> Union[str, Any]:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = seq_length
UpperCamelCase_ = is_training
UpperCamelCase_ = use_labels
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = eos_token_id
UpperCamelCase_ = pad_token_id
UpperCamelCase_ = bos_token_id
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCamelCase_ = prepare_mbart_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return config, inputs_dict
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = TFMBartModel(config=_UpperCAmelCase ).get_decoder()
UpperCamelCase_ = inputs_dict['input_ids']
UpperCamelCase_ = input_ids[:1, :]
UpperCamelCase_ = inputs_dict['attention_mask'][:1, :]
UpperCamelCase_ = inputs_dict['head_mask']
UpperCamelCase_ = 1
# first forward pass
UpperCamelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = outputs.to_tuple()
UpperCamelCase_ = past_key_values[1]
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , ):
if attention_mask is None:
UpperCamelCase_ = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id) , tf.inta)
if decoder_attention_mask is None:
UpperCamelCase_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id) , tf.inta),
] , axis=-1 , )
if head_mask is None:
UpperCamelCase_ = tf.ones((config.encoder_layers, config.encoder_attention_heads))
if decoder_head_mask is None:
UpperCamelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads))
if cross_attn_head_mask is None:
UpperCamelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads))
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
A_ = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
A_ = (
{
"""conversational""": TFMBartForConditionalGeneration,
"""feature-extraction""": TFMBartModel,
"""summarization""": TFMBartForConditionalGeneration,
"""text2text-generation""": TFMBartForConditionalGeneration,
"""translation""": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
A_ = True
A_ = False
A_ = False
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = TFMBartModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
A_ = [
""" UN Chief Says There Is No Military Solution in Syria""",
]
A_ = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
]
A_ = """facebook/mbart-large-en-ro"""
@cached_property
def _UpperCAmelCase ( self ) -> Any:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _UpperCAmelCase ( self , **_UpperCAmelCase ) -> int:
UpperCamelCase_ = self.translate_src_text(**_UpperCAmelCase )
self.assertListEqual(self.expected_text , _UpperCAmelCase )
def _UpperCAmelCase ( self , **_UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = self.tokenizer(self.src_text , **_UpperCAmelCase , return_tensors='tf' )
UpperCamelCase_ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
UpperCamelCase_ = self.tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
return generated_words
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
self._assert_generated_batch_equal_expected()
| 23 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : List[str] = logging.get_logger(__name__)
_A : List[Any] = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ : Dict = """audio-spectrogram-transformer"""
def __init__( self , A_=7_68 , A_=12 , A_=12 , A_=30_72 , A_="gelu" , A_=0.0 , A_=0.0 , A_=0.02 , A_=1E-12 , A_=16 , A_=True , A_=10 , A_=10 , A_=10_24 , A_=1_28 , **A_ , ):
'''simple docstring'''
super().__init__(**A_ )
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = qkv_bias
SCREAMING_SNAKE_CASE__ = frequency_stride
SCREAMING_SNAKE_CASE__ = time_stride
SCREAMING_SNAKE_CASE__ = max_length
SCREAMING_SNAKE_CASE__ = num_mel_bins
| 100 |
def _snake_case (__lowercase):
UpperCamelCase_ = 1
for i in range(1 , num + 1):
fact *= i
return fact
def _snake_case (__lowercase):
UpperCamelCase_ = 0
while number > 0:
UpperCamelCase_ = number % 10
sum_of_digits += last_digit
UpperCamelCase_ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def _snake_case (__lowercase = 100):
UpperCamelCase_ = factorial(__lowercase)
UpperCamelCase_ = split_and_add(__lowercase)
return result
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 23 | 0 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 101 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
snake_case__ : str = logging.get_logger(__name__)
def _snake_case (__lowercase):
if isinstance(__lowercase , (list, tuple)) and isinstance(videos[0] , (list, tuple)) and is_valid_image(videos[0][0]):
return videos
elif isinstance(__lowercase , (list, tuple)) and is_valid_image(videos[0]):
return [videos]
elif is_valid_image(__lowercase):
return [[videos]]
raise ValueError(f"""Could not make batched video from {videos}""")
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = ["""pixel_values"""]
def __init__( self , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = PILImageResampling.BILINEAR , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 255 , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> None:
super().__init__(**_UpperCAmelCase )
UpperCamelCase_ = size if size is not None else {'shortest_edge': 224}
UpperCamelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
UpperCamelCase_ = crop_size if crop_size is not None else {'height': 224, 'width': 224}
UpperCamelCase_ = get_size_dict(_UpperCAmelCase , param_name='crop_size' )
UpperCamelCase_ = do_resize
UpperCamelCase_ = size
UpperCamelCase_ = do_center_crop
UpperCamelCase_ = crop_size
UpperCamelCase_ = resample
UpperCamelCase_ = do_rescale
UpperCamelCase_ = rescale_factor
UpperCamelCase_ = do_normalize
UpperCamelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = PILImageResampling.BILINEAR , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> np.ndarray:
UpperCamelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
if "shortest_edge" in size:
UpperCamelCase_ = get_resize_output_image_size(_UpperCAmelCase , size['shortest_edge'] , default_to_square=_UpperCAmelCase )
elif "height" in size and "width" in size:
UpperCamelCase_ = (size['height'], size['width'])
else:
raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> np.ndarray:
UpperCamelCase_ = get_size_dict(_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(_UpperCAmelCase , size=(size['height'], size['width']) , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> int:
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> np.ndarray:
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
UpperCamelCase_ = to_numpy_array(_UpperCAmelCase )
if do_resize:
UpperCamelCase_ = self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase )
if do_center_crop:
UpperCamelCase_ = self.center_crop(_UpperCAmelCase , size=_UpperCAmelCase )
if do_rescale:
UpperCamelCase_ = self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase )
if do_normalize:
UpperCamelCase_ = self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase )
UpperCamelCase_ = to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase )
return image
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , **_UpperCAmelCase , ) -> PIL.Image.Image:
UpperCamelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase_ = resample if resample is not None else self.resample
UpperCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase_ = image_std if image_std is not None else self.image_std
UpperCamelCase_ = size if size is not None else self.size
UpperCamelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
UpperCamelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase_ = get_size_dict(_UpperCAmelCase , param_name='crop_size' )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
UpperCamelCase_ = make_batched(_UpperCAmelCase )
UpperCamelCase_ = [
[
self._preprocess_image(
image=_UpperCAmelCase , do_resize=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , do_center_crop=_UpperCAmelCase , crop_size=_UpperCAmelCase , do_rescale=_UpperCAmelCase , rescale_factor=_UpperCAmelCase , do_normalize=_UpperCAmelCase , image_mean=_UpperCAmelCase , image_std=_UpperCAmelCase , data_format=_UpperCAmelCase , )
for img in video
]
for video in videos
]
UpperCamelCase_ = {'pixel_values': videos}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
| 23 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ : Dict = logging.get_logger(__name__)
__magic_name__ : List[Any] = {
"""google/canine-s""": """https://huggingface.co/google/canine-s/resolve/main/config.json""",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase : List[str] = """canine"""
def __init__( self , _A=7_6_8 , _A=1_2 , _A=1_2 , _A=3_0_7_2 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1_6_3_8_4 , _A=1_6 , _A=0.02 , _A=1e-1_2 , _A=0 , _A=0xE_000 , _A=0xE_001 , _A=4 , _A=4 , _A=8 , _A=1_6_3_8_4 , _A=1_2_8 , **_A , ):
'''simple docstring'''
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
UpperCamelCase : Optional[Any] = max_position_embeddings
UpperCamelCase : Optional[Any] = hidden_size
UpperCamelCase : Dict = num_hidden_layers
UpperCamelCase : List[str] = num_attention_heads
UpperCamelCase : Optional[int] = intermediate_size
UpperCamelCase : Optional[Any] = hidden_act
UpperCamelCase : List[Any] = hidden_dropout_prob
UpperCamelCase : Union[str, Any] = attention_probs_dropout_prob
UpperCamelCase : Union[str, Any] = initializer_range
UpperCamelCase : str = type_vocab_size
UpperCamelCase : Optional[int] = layer_norm_eps
# Character config:
UpperCamelCase : Dict = downsampling_rate
UpperCamelCase : List[Any] = upsampling_kernel_size
UpperCamelCase : str = num_hash_functions
UpperCamelCase : Dict = num_hash_buckets
UpperCamelCase : str = local_transformer_stride
| 102 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = 42
A_ = 42
class _a ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
A_ = 1
@register_to_config
def __init__( self , _UpperCAmelCase = 2000 , _UpperCAmelCase = 0.1_5 , _UpperCAmelCase = 0.0_1 , _UpperCAmelCase = 1_3_4_8.0 , _UpperCAmelCase = 1e-5 , _UpperCAmelCase = 1 , ) -> Tuple:
# standard deviation of the initial noise distribution
UpperCamelCase_ = sigma_max
# setable values
UpperCamelCase_ = None
self.set_sigmas(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> torch.FloatTensor:
return sample
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None ) -> str:
UpperCamelCase_ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
UpperCamelCase_ = torch.linspace(1 , _UpperCAmelCase , _UpperCAmelCase , device=_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None ) -> Any:
UpperCamelCase_ = sigma_min if sigma_min is not None else self.config.sigma_min
UpperCamelCase_ = sigma_max if sigma_max is not None else self.config.sigma_max
UpperCamelCase_ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
UpperCamelCase_ = torch.exp(torch.linspace(math.log(_UpperCAmelCase ) , math.log(_UpperCAmelCase ) , _UpperCAmelCase ) )
UpperCamelCase_ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = True , ) -> Union[SdeVeOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
UpperCamelCase_ = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
UpperCamelCase_ = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
UpperCamelCase_ = timesteps.to(self.discrete_sigmas.device )
UpperCamelCase_ = self.discrete_sigmas[timesteps].to(sample.device )
UpperCamelCase_ = self.get_adjacent_sigma(_UpperCAmelCase , _UpperCAmelCase ).to(sample.device )
UpperCamelCase_ = torch.zeros_like(_UpperCAmelCase )
UpperCamelCase_ = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
UpperCamelCase_ = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
UpperCamelCase_ = diffusion.unsqueeze(-1 )
UpperCamelCase_ = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
UpperCamelCase_ = randn_tensor(
sample.shape , layout=sample.layout , generator=_UpperCAmelCase , device=sample.device , dtype=sample.dtype )
UpperCamelCase_ = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
UpperCamelCase_ = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=_UpperCAmelCase , prev_sample_mean=_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = True , ) -> Union[SchedulerOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
UpperCamelCase_ = randn_tensor(sample.shape , layout=sample.layout , generator=_UpperCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
UpperCamelCase_ = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
UpperCamelCase_ = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
UpperCamelCase_ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
UpperCamelCase_ = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
UpperCamelCase_ = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
UpperCamelCase_ = step_size.unsqueeze(-1 )
UpperCamelCase_ = sample + step_size * model_output
UpperCamelCase_ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
UpperCamelCase_ = timesteps.to(original_samples.device )
UpperCamelCase_ = self.discrete_sigmas.to(original_samples.device )[timesteps]
UpperCamelCase_ = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(_UpperCAmelCase ) * sigmas[:, None, None, None]
)
UpperCamelCase_ = noise + original_samples
return noisy_samples
def __len__( self ) -> Optional[int]:
return self.config.num_train_timesteps
| 23 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
snake_case = transforms.Compose(
[
transforms.Resize((2_5_6, 2_5_6)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def snake_case ( lowerCAmelCase_ ) -> Tuple:
if isinstance(lowerCAmelCase_ , torch.Tensor ):
return image
elif isinstance(lowerCAmelCase_ , PIL.Image.Image ):
_snake_case = [image]
_snake_case = [trans(img.convert('''RGB''' ) ) for img in image]
_snake_case = torch.stack(lowerCAmelCase_ )
return image
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : str , __lowerCamelCase : str , __lowerCamelCase : str ):
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
_snake_case = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Tuple ):
"""simple docstring"""
if strength < 0 or strength > 1:
raise ValueError(f"""The value of strength should in [0.0, 1.0] but is {strength}""" )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
# get the original timestep using init_timestep
_snake_case = min(int(num_inference_steps * strength ) , __lowerCamelCase )
_snake_case = max(num_inference_steps - init_timestep , 0 )
_snake_case = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __UpperCAmelCase ( self : Any , __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any]=None ):
"""simple docstring"""
if not isinstance(__lowerCamelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__lowerCamelCase )}""" )
_snake_case = image.to(device=__lowerCamelCase , dtype=__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ) and len(__lowerCamelCase ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(__lowerCamelCase )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
_snake_case = init_latents.shape
_snake_case = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase , dtype=__lowerCamelCase )
# get latents
print('''add noise to latents at timestep''' , __lowerCamelCase )
_snake_case = self.scheduler.add_noise(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_snake_case = init_latents
return latents
@torch.no_grad()
def __call__( self : int , __lowerCamelCase : Union[torch.FloatTensor, PIL.Image.Image] = None , __lowerCamelCase : float = 0.8 , __lowerCamelCase : int = 1 , __lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCamelCase : float = 0.0 , __lowerCamelCase : int = 5_0 , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , ):
"""simple docstring"""
self.check_inputs(__lowerCamelCase )
# 2. Preprocess image
_snake_case = preprocess(__lowerCamelCase )
# 3. set timesteps
self.scheduler.set_timesteps(__lowerCamelCase , device=self.device )
_snake_case , _snake_case = self.get_timesteps(__lowerCamelCase , __lowerCamelCase , self.device )
_snake_case = timesteps[:1].repeat(__lowerCamelCase )
# 4. Prepare latent variables
_snake_case = self.prepare_latents(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , self.unet.dtype , self.device , __lowerCamelCase )
_snake_case = latents
# 5. Denoising loop
for t in self.progress_bar(__lowerCamelCase ):
# 1. predict noise model_output
_snake_case = self.unet(__lowerCamelCase , __lowerCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_snake_case = self.scheduler.step(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , eta=__lowerCamelCase , use_clipped_model_output=__lowerCamelCase , generator=__lowerCamelCase , ).prev_sample
_snake_case = (image / 2 + 0.5).clamp(0 , 1 )
_snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_snake_case = self.numpy_to_pil(__lowerCamelCase )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=__lowerCamelCase )
| 103 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ : Optional[int] = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Dict = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
snake_case__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 23 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = """▁"""
UpperCamelCase = {"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""}
UpperCamelCase = {
"""vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""",
},
"""monolingual_vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""",
},
}
UpperCamelCase = {"""vinai/bartpho-syllable""": 1024}
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : List[str] = VOCAB_FILES_NAMES
A__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
A__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : str = ["input_ids", "attention_mask"]
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
A__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
A__ = vocab_file
A__ = monolingual_vocab_file
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE__ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
A__ = {}
A__ = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(SCREAMING_SNAKE_CASE__ ) not in self.fairseq_tokens_to_ids:
A__ = cnt
cnt += 1
with open(SCREAMING_SNAKE_CASE__ , "r" , encoding="utf-8" ) as f:
for line in f.readlines():
A__ = line.strip().split()[0]
A__ = len(self.fairseq_tokens_to_ids )
if str(SCREAMING_SNAKE_CASE__ ) not in self.fairseq_tokens_to_ids:
A__ = len(self.fairseq_tokens_to_ids )
A__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Dict:
A__ = self.__dict__.copy()
A__ = None
A__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
A__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
A__ = {}
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A__ = [self.cls_token_id]
A__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]:
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case__ ( self ) -> str:
return len(self.fairseq_ids_to_tokens )
def snake_case__ ( self ) -> Dict:
A__ = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> List[str]:
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
return self.fairseq_ids_to_tokens[index]
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> str:
A__ = "".join(SCREAMING_SNAKE_CASE__ ).replace(SCREAMING_SNAKE_CASE__ , " " ).strip()
return out_string
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
A__ = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
A__ = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["monolingual_vocab_file"] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , "wb" ) as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , "w" , encoding="utf-8" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f"""{str(SCREAMING_SNAKE_CASE__ )} \n""" )
return out_vocab_file, out_monolingual_vocab_file
| 104 |
import datasets
from .evaluate import evaluate
snake_case__ : int = """\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
"""
snake_case__ : Union[str, Any] = """
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
"""
snake_case__ : Any = """
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the CUAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
'aupr': Area Under the Precision-Recall curve
'prec_at_80_recall': Precision at 80% recall
'prec_at_90_recall': Precision at 90% recall
Examples:
>>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> cuad_metric = datasets.load_metric(\"cuad\")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': {
'id': datasets.Value('string' ),
'prediction_text': datasets.features.Sequence(datasets.Value('string' ) ),
},
'references': {
'id': datasets.Value('string' ),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
},
} ) , codebase_urls=['https://www.atticusprojectai.org/cuad'] , reference_urls=['https://www.atticusprojectai.org/cuad'] , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
UpperCamelCase_ = {prediction['id']: prediction['prediction_text'] for prediction in predictions}
UpperCamelCase_ = [
{
'paragraphs': [
{
'qas': [
{
'answers': [{'text': answer_text} for answer_text in ref['answers']['text']],
'id': ref['id'],
}
for ref in references
]
}
]
}
]
UpperCamelCase_ = evaluate(dataset=_UpperCAmelCase , predictions=_UpperCAmelCase )
return score
| 23 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__ : Dict = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Any = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Dict = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : str = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
UpperCamelCase__ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 105 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class _a ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> List[str]:
return datasets.DatasetInfo(
features=datasets.Features({'content': datasets.Value('string' )} ) , supervised_keys=_UpperCAmelCase , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_dummy_examples()} )]
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase )
class _a ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Any:
return datasets.DatasetInfo(
features=datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) , supervised_keys=_UpperCAmelCase , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_nested_examples()} )
]
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase )
def _snake_case ():
return [(i, {"content": content}) for i, content in enumerate(['foo', 'bar', 'foobar'])]
def _snake_case ():
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['foo', 'bar', 'foobar'])]
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
@require_beam
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase_ = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
UpperCamelCase_ = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , _UpperCAmelCase )
self.assertEqual(dset['train'].info.splits['train'].num_examples , _UpperCAmelCase )
self.assertDictEqual(dset['train'][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def _UpperCAmelCase ( self ) -> List[str]:
import apache_beam as beam
UpperCamelCase_ = beam.io.parquetio.WriteToParquet
UpperCamelCase_ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase_ = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner='DirectRunner' )
with patch('apache_beam.io.parquetio.WriteToParquet' ) as write_parquet_mock:
UpperCamelCase_ = partial(_UpperCAmelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
UpperCamelCase_ = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , _UpperCAmelCase )
self.assertEqual(dset['train'].info.splits['train'].num_examples , _UpperCAmelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['train']['content'] ) , sorted(['foo', 'bar', 'foobar'] ) )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def _UpperCAmelCase ( self ) -> Any:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase_ = DummyBeamDataset(cache_dir=_UpperCAmelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase_ = NestedBeamDataset(cache_dir=_UpperCAmelCase , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) )
UpperCamelCase_ = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , _UpperCAmelCase )
self.assertEqual(dset['train'].info.splits['train'].num_examples , _UpperCAmelCase )
self.assertDictEqual(dset['train'][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
| 23 | 0 |
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('0.8.3'):
raise Exception('requires gluonnlp == 0.8.3')
if version.parse(mx.__version__) != version.parse('1.5.0'):
raise Exception('requires mxnet == 1.5.0')
logging.set_verbosity_info()
__snake_case :Union[str, Any] =logging.get_logger(__name__)
__snake_case :Dict ='The Nymphenburg Palace is a beautiful palace in Munich!'
def lowerCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : str ) -> List[Any]:
'''simple docstring'''
A = {
'attention_cell': 'multi_head',
'num_layers': 4,
'units': 1024,
'hidden_size': 768,
'max_length': 512,
'num_heads': 8,
'scaled': True,
'dropout': 0.1,
'use_residual': True,
'embed_size': 1024,
'embed_dropout': 0.1,
'word_embed': None,
'layer_norm_eps': 1E-5,
'token_type_vocab_size': 2,
}
A = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
A = BERTEncoder(
attention_cell=predefined_args['attention_cell'] , num_layers=predefined_args['num_layers'] , units=predefined_args['units'] , hidden_size=predefined_args['hidden_size'] , max_length=predefined_args['max_length'] , num_heads=predefined_args['num_heads'] , scaled=predefined_args['scaled'] , dropout=predefined_args['dropout'] , output_attention=lowerCAmelCase__ , output_all_encodings=lowerCAmelCase__ , use_residual=predefined_args['use_residual'] , activation=predefined_args.get('activation' , 'gelu' ) , layer_norm_eps=predefined_args.get('layer_norm_eps' , lowerCAmelCase__ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
A = 'openwebtext_ccnews_stories_books_cased'
# Specify download folder to Gluonnlp's vocab
A = os.path.join(get_home_dir() , 'models' )
A = _load_vocab(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , cls=lowerCAmelCase__ )
A = nlp.model.BERTModel(
lowerCAmelCase__ , len(lowerCAmelCase__ ) , units=predefined_args['units'] , embed_size=predefined_args['embed_size'] , embed_dropout=predefined_args['embed_dropout'] , word_embed=predefined_args['word_embed'] , use_pooler=lowerCAmelCase__ , use_token_type_embed=lowerCAmelCase__ , token_type_vocab_size=predefined_args['token_type_vocab_size'] , use_classifier=lowerCAmelCase__ , use_decoder=lowerCAmelCase__ , )
original_bort.load_parameters(lowerCAmelCase__ , cast_dtype=lowerCAmelCase__ , ignore_extra=lowerCAmelCase__ )
A = original_bort._collect_params_with_prefix()
# Build our config 🤗
A = {
'architectures': ['BertForMaskedLM'],
'attention_probs_dropout_prob': predefined_args['dropout'],
'hidden_act': 'gelu',
'hidden_dropout_prob': predefined_args['dropout'],
'hidden_size': predefined_args['embed_size'],
'initializer_range': 0.02,
'intermediate_size': predefined_args['hidden_size'],
'layer_norm_eps': predefined_args['layer_norm_eps'],
'max_position_embeddings': predefined_args['max_length'],
'model_type': 'bort',
'num_attention_heads': predefined_args['num_heads'],
'num_hidden_layers': predefined_args['num_layers'],
'pad_token_id': 1, # 2 = BERT, 1 = RoBERTa
'type_vocab_size': 1, # 2 = BERT, 1 = RoBERTa
'vocab_size': len(lowerCAmelCase__ ),
}
A = BertConfig.from_dict(lowerCAmelCase__ )
A = BertForMaskedLM(lowerCAmelCase__ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowerCAmelCase__ : Optional[int] ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int ):
A = hf_param.shape
A = to_torch(params[gluon_param] )
A = gluon_param.shape
assert (
shape_hf == shape_gluon
), F'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
A = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , 'word_embed.0.weight' )
A = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , 'encoder.position_weight' )
A = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , 'encoder.layer_norm.beta' )
A = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , 'encoder.layer_norm.gamma' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
A = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
A = hf_bort_model.bert.encoder.layer[i]
# self attention
A = layer.attention.self
A = check_and_map_params(
self_attn.key.bias.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
A = check_and_map_params(
self_attn.key.weight.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
A = check_and_map_params(
self_attn.query.bias.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
A = check_and_map_params(
self_attn.query.weight.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
A = check_and_map_params(
self_attn.value.bias.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
A = check_and_map_params(
self_attn.value.weight.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
A = layer.attention.output
A = check_and_map_params(
self_output.dense.bias , F'''encoder.transformer_cells.{i}.proj.bias''' )
A = check_and_map_params(
self_output.dense.weight , F'''encoder.transformer_cells.{i}.proj.weight''' )
A = check_and_map_params(
self_output.LayerNorm.bias , F'''encoder.transformer_cells.{i}.layer_norm.beta''' )
A = check_and_map_params(
self_output.LayerNorm.weight , F'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
A = layer.intermediate
A = check_and_map_params(
intermediate.dense.bias , F'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
A = check_and_map_params(
intermediate.dense.weight , F'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
A = layer.output
A = check_and_map_params(
bert_output.dense.bias , F'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
A = check_and_map_params(
bert_output.dense.weight , F'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
A = check_and_map_params(
bert_output.LayerNorm.bias , F'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
A = check_and_map_params(
bert_output.LayerNorm.weight , F'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
A = RobertaTokenizer.from_pretrained('roberta-base' )
A = tokenizer.encode_plus(lowerCAmelCase__ )['input_ids']
# Get gluon output
A = mx.nd.array([input_ids] )
A = original_bort(inputs=lowerCAmelCase__ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowerCAmelCase__ )
A = BertModel.from_pretrained(lowerCAmelCase__ )
hf_bort_model.eval()
A = tokenizer.encode_plus(lowerCAmelCase__ , return_tensors='pt' )
A = hf_bort_model(**lowerCAmelCase__ )[0]
A = output_gluon[0].asnumpy()
A = output_hf[0].detach().numpy()
A = np.max(np.abs(hf_layer - gluon_layer ) ).item()
A = np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 )
if success:
print('✔️ Both model do output the same tensors' )
else:
print('❌ Both model do **NOT** output the same tensors' )
print('Absolute difference is:' , lowerCAmelCase__ )
if __name__ == "__main__":
__snake_case :str =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bort_checkpoint_path', default=None, type=str, required=True, help='Path the official Bort params file.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__snake_case :Any =parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path) | 106 |
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def _snake_case (__lowercase , __lowercase , __lowercase):
# Initialise PyTorch model
UpperCamelCase_ = AlbertConfig.from_json_file(__lowercase)
print(f"""Building PyTorch model from configuration: {config}""")
UpperCamelCase_ = AlbertForPreTraining(__lowercase)
# Load weights from tf checkpoint
load_tf_weights_in_albert(__lowercase , __lowercase , __lowercase)
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""")
torch.save(model.state_dict() , __lowercase)
if __name__ == "__main__":
snake_case__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--albert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained ALBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
snake_case__ : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 23 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCAmelCase : str = {
'''configuration_nezha''': ['''NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NezhaConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
'''NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NezhaForNextSentencePrediction''',
'''NezhaForMaskedLM''',
'''NezhaForPreTraining''',
'''NezhaForMultipleChoice''',
'''NezhaForQuestionAnswering''',
'''NezhaForSequenceClassification''',
'''NezhaForTokenClassification''',
'''NezhaModel''',
'''NezhaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 107 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
@slow
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' )
UpperCamelCase_ = BertTokenizer.from_pretrained('bert-base-uncased' )
UpperCamelCase_ = bertabert.config.encoder.vocab_size
UpperCamelCase_ = tokenizer.sep_token_id
UpperCamelCase_ = tokenizer.cls_token_id
UpperCamelCase_ = 128
UpperCamelCase_ = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' )
UpperCamelCase_ = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' )
UpperCamelCase_ = train_dataset.select(range(32 ) )
UpperCamelCase_ = val_dataset.select(range(16 ) )
UpperCamelCase_ = 4
def _map_to_encoder_decoder_inputs(_UpperCAmelCase ):
# Tokenizer will automatically set [BOS] <text> [EOS]
UpperCamelCase_ = tokenizer(batch['article'] , padding='max_length' , truncation=_UpperCAmelCase , max_length=512 )
UpperCamelCase_ = tokenizer(batch['highlights'] , padding='max_length' , truncation=_UpperCAmelCase , max_length=128 )
UpperCamelCase_ = inputs.input_ids
UpperCamelCase_ = inputs.attention_mask
UpperCamelCase_ = outputs.input_ids
UpperCamelCase_ = outputs.input_ids.copy()
UpperCamelCase_ = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
UpperCamelCase_ = outputs.attention_mask
assert all(len(_UpperCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(_UpperCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_UpperCAmelCase ):
UpperCamelCase_ = pred.label_ids
UpperCamelCase_ = pred.predictions
# all unnecessary tokens are removed
UpperCamelCase_ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
UpperCamelCase_ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
UpperCamelCase_ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_UpperCAmelCase ) )] ) / len(_UpperCAmelCase )
return {"accuracy": accuracy}
# map train dataset
UpperCamelCase_ = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_UpperCAmelCase , batch_size=_UpperCAmelCase , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
UpperCamelCase_ = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_UpperCAmelCase , batch_size=_UpperCAmelCase , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = SeqaSeqTrainingArguments(
output_dir=_UpperCAmelCase , per_device_train_batch_size=_UpperCAmelCase , per_device_eval_batch_size=_UpperCAmelCase , predict_with_generate=_UpperCAmelCase , evaluation_strategy='steps' , do_train=_UpperCAmelCase , do_eval=_UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
UpperCamelCase_ = SeqaSeqTrainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , tokenizer=_UpperCAmelCase , )
# start training
trainer.train()
| 23 | 0 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Dict , lowerCamelCase : List[str] , lowerCamelCase : Optional[Any]=13 , lowerCamelCase : Dict=32 , lowerCamelCase : Optional[int]=3 , lowerCamelCase : List[Any]=4 , lowerCamelCase : Tuple=[10, 20, 30, 40] , lowerCamelCase : str=[2, 2, 3, 2] , lowerCamelCase : Optional[int]=True , lowerCamelCase : List[Any]=True , lowerCamelCase : Union[str, Any]=37 , lowerCamelCase : Any="gelu" , lowerCamelCase : Tuple=10 , lowerCamelCase : Optional[Any]=0.02 , lowerCamelCase : Union[str, Any]=["stage2", "stage3", "stage4"] , lowerCamelCase : Any=[2, 3, 4] , lowerCamelCase : Tuple=None , ) -> int:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = num_stages
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = num_labels
_UpperCAmelCase = initializer_range
_UpperCAmelCase = out_features
_UpperCAmelCase = out_indices
_UpperCAmelCase = scope
def lowerCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowerCamelCase ( self : Union[str, Any] , lowerCamelCase : List[str] , lowerCamelCase : Optional[int] , lowerCamelCase : Any ) -> str:
"""simple docstring"""
_UpperCAmelCase = ConvNextModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
_UpperCAmelCase = model(lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase ( self : Dict , lowerCamelCase : Tuple , lowerCamelCase : Tuple , lowerCamelCase : List[Any] ) -> int:
"""simple docstring"""
_UpperCAmelCase = ConvNextForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
_UpperCAmelCase = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self : Dict , lowerCamelCase : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : Dict ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = ConvNextBackbone(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
_UpperCAmelCase = model(lowerCamelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_UpperCAmelCase = None
_UpperCAmelCase = ConvNextBackbone(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
_UpperCAmelCase = model(lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
_lowerCamelCase = (
{'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification}
if is_torch_available()
else {}
)
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def lowerCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_UpperCAmelCase = ConvNextModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=37 )
def lowerCamelCase ( self : int ) -> int:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def lowerCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def lowerCamelCase ( self : int ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def lowerCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
pass
def lowerCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(lowerCamelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def lowerCamelCase ( self : Tuple ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def lowerCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase )
def lowerCamelCase ( self : int ) -> Any:
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : Optional[int] ):
_UpperCAmelCase = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
_UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCAmelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowerCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@slow
def lowerCamelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = ConvNextModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( ) -> Tuple:
_UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCamelCase ( self : Dict ) -> int:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def lowerCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_UpperCAmelCase = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(lowerCamelCase )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=lowerCamelCase , return_tensors="""pt""" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**lowerCamelCase )
# verify the logits
_UpperCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
_UpperCAmelCase = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1E-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase , UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = (ConvNextBackbone,) if is_torch_available() else ()
_lowerCamelCase = ConvNextConfig
_lowerCamelCase = False
def lowerCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = ConvNextModelTester(self ) | 108 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
snake_case__ : Dict = 1_6
snake_case__ : List[str] = 3_2
def _snake_case (__lowercase , __lowercase = 16):
UpperCamelCase_ = AutoTokenizer.from_pretrained('bert-base-cased')
UpperCamelCase_ = load_dataset('glue' , 'mrpc')
def tokenize_function(__lowercase):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase_ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__lowercase , max_length=__lowercase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase_ = datasets.map(
__lowercase , batched=__lowercase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase_ = tokenized_datasets.rename_column('label' , 'labels')
def collate_fn(__lowercase):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase_ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase_ = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase_ = 8
else:
UpperCamelCase_ = None
return tokenizer.pad(
__lowercase , padding='longest' , max_length=__lowercase , pad_to_multiple_of=__lowercase , return_tensors='pt' , )
# Instantiate dataloaders.
UpperCamelCase_ = DataLoader(
tokenized_datasets['train'] , shuffle=__lowercase , collate_fn=__lowercase , batch_size=__lowercase)
UpperCamelCase_ = DataLoader(
tokenized_datasets['validation'] , shuffle=__lowercase , collate_fn=__lowercase , batch_size=__lowercase)
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
snake_case__ : List[str] = mocked_dataloaders # noqa: F811
def _snake_case (__lowercase , __lowercase):
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , __lowercase) == "1":
UpperCamelCase_ = 2
# New Code #
UpperCamelCase_ = int(args.gradient_accumulation_steps)
# Initialize accelerator
UpperCamelCase_ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__lowercase)
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`')
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase_ = config['lr']
UpperCamelCase_ = int(config['num_epochs'])
UpperCamelCase_ = int(config['seed'])
UpperCamelCase_ = int(config['batch_size'])
UpperCamelCase_ = evaluate.load('glue' , 'mrpc')
set_seed(__lowercase)
UpperCamelCase_ , UpperCamelCase_ = get_dataloaders(__lowercase , __lowercase)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase_ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=__lowercase)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase_ = model.to(accelerator.device)
# Instantiate optimizer
UpperCamelCase_ = AdamW(params=model.parameters() , lr=__lowercase)
# Instantiate scheduler
UpperCamelCase_ = get_linear_schedule_with_warmup(
optimizer=__lowercase , num_warmup_steps=100 , num_training_steps=(len(__lowercase) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = accelerator.prepare(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase)
# Now we train the model
for epoch in range(__lowercase):
model.train()
for step, batch in enumerate(__lowercase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__lowercase):
UpperCamelCase_ = model(**__lowercase)
UpperCamelCase_ = output.loss
accelerator.backward(__lowercase)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowercase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
UpperCamelCase_ = model(**__lowercase)
UpperCamelCase_ = outputs.logits.argmax(dim=-1)
UpperCamelCase_ , UpperCamelCase_ = accelerator.gather_for_metrics((predictions, batch['labels']))
metric.add_batch(
predictions=__lowercase , references=__lowercase , )
UpperCamelCase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __lowercase)
def _snake_case ():
UpperCamelCase_ = argparse.ArgumentParser(description='Simple example of training script.')
parser.add_argument(
'--mixed_precision' , type=__lowercase , default=__lowercase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=__lowercase , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.')
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(__lowercase , __lowercase)
if __name__ == "__main__":
main()
| 23 | 0 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
a = "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 109 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class _a :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=2 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=10 , _UpperCAmelCase=3 , _UpperCAmelCase=32 * 8 , _UpperCAmelCase=32 * 8 , _UpperCAmelCase=4 , _UpperCAmelCase=64 , ) -> List[Any]:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = is_training
UpperCamelCase_ = use_auxiliary_loss
UpperCamelCase_ = num_queries
UpperCamelCase_ = num_channels
UpperCamelCase_ = min_size
UpperCamelCase_ = max_size
UpperCamelCase_ = num_labels
UpperCamelCase_ = hidden_dim
UpperCamelCase_ = hidden_dim
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_UpperCAmelCase )
UpperCamelCase_ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_UpperCAmelCase )
UpperCamelCase_ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_UpperCAmelCase ) > 0.5
).float()
UpperCamelCase_ = (torch.rand((self.batch_size, self.num_labels) , device=_UpperCAmelCase ) > 0.5).long()
UpperCamelCase_ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
UpperCamelCase_ = self.num_queries
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = [1, 1, 1, 1]
UpperCamelCase_ = self.num_channels
UpperCamelCase_ = 64
UpperCamelCase_ = 128
UpperCamelCase_ = self.hidden_dim
UpperCamelCase_ = self.hidden_dim
UpperCamelCase_ = self.hidden_dim
return config
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
UpperCamelCase_ = output.encoder_hidden_states
UpperCamelCase_ = output.pixel_decoder_hidden_states
UpperCamelCase_ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_UpperCAmelCase ) , config.decoder_layers )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Any:
with torch.no_grad():
UpperCamelCase_ = MaskaFormerModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase )
UpperCamelCase_ = model(_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
UpperCamelCase_ = MaskaFormerForUniversalSegmentation(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
def comm_check_on_output(_UpperCAmelCase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCamelCase_ = model(pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase )
UpperCamelCase_ = model(_UpperCAmelCase )
comm_check_on_output(_UpperCAmelCase )
UpperCamelCase_ = model(
pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase )
comm_check_on_output(_UpperCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
A_ = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {}
A_ = False
A_ = False
A_ = False
A_ = False
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = MaskaFormerModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_UpperCAmelCase , **_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_UpperCAmelCase )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Any:
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def _UpperCAmelCase ( self ) -> Any:
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _UpperCAmelCase ( self ) -> int:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> str:
pass
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_UpperCAmelCase )
UpperCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ = [*signature.parameters.keys()]
UpperCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
@slow
def _UpperCAmelCase ( self ) -> Tuple:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
UpperCamelCase_ = MaskaFormerModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = (self.model_tester.min_size,) * 2
UpperCamelCase_ = {
'pixel_values': torch.randn((2, 3, *size) , device=_UpperCAmelCase ),
'mask_labels': torch.randn((2, 10, *size) , device=_UpperCAmelCase ),
'class_labels': torch.zeros(2 , 10 , device=_UpperCAmelCase ).long(),
}
UpperCamelCase_ = self.model_tester.get_config()
UpperCamelCase_ = MaskaFormerForUniversalSegmentation(_UpperCAmelCase ).to(_UpperCAmelCase )
UpperCamelCase_ = model(**_UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_UpperCAmelCase , **_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_UpperCAmelCase ).to(_UpperCAmelCase )
UpperCamelCase_ = model(**_UpperCAmelCase , output_attentions=_UpperCAmelCase )
self.assertTrue(outputs.attentions is not None )
def _UpperCAmelCase ( self ) -> List[Any]:
if not self.model_tester.is_training:
return
UpperCamelCase_ = self.all_model_classes[1]
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
UpperCamelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
UpperCamelCase_ = model(_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase ).loss
loss.backward()
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = self.all_model_classes[1]
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
UpperCamelCase_ = True
UpperCamelCase_ = True
UpperCamelCase_ = model_class(_UpperCAmelCase ).to(_UpperCAmelCase )
model.train()
UpperCamelCase_ = model(_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase )
UpperCamelCase_ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCamelCase_ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
UpperCamelCase_ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCamelCase_ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_UpperCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
snake_case__ : List[Any] = 1E-4
def _snake_case ():
UpperCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_vision
@slow
class _a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCAmelCase ( self ) -> Optional[int]:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _UpperCAmelCase ( self ) -> List[str]:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase )
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
UpperCamelCase_ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCAmelCase , (1, 3, 384, 384) )
with torch.no_grad():
UpperCamelCase_ = model(**_UpperCAmelCase )
UpperCamelCase_ = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
UpperCamelCase_ = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
UpperCamelCase_ = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase ).eval()
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
UpperCamelCase_ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCAmelCase , (1, 3, 384, 384) )
with torch.no_grad():
UpperCamelCase_ = model(**_UpperCAmelCase )
# masks_queries_logits
UpperCamelCase_ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
UpperCamelCase_ = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
UpperCamelCase_ = torch.tensor(_UpperCAmelCase ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
# class_queries_logits
UpperCamelCase_ = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
UpperCamelCase_ = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase ).eval()
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , )
UpperCamelCase_ = inputs['pixel_values'].to(_UpperCAmelCase )
UpperCamelCase_ = [el.to(_UpperCAmelCase ) for el in inputs['mask_labels']]
UpperCamelCase_ = [el.to(_UpperCAmelCase ) for el in inputs['class_labels']]
with torch.no_grad():
UpperCamelCase_ = model(**_UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
| 23 | 0 |
"""simple docstring"""
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def lowerCamelCase ( _snake_case ,_snake_case=None ):
UpperCAmelCase__ : int = None
if token is not None:
UpperCAmelCase__ : Dict = {'Accept': 'application/vnd.github+json', 'Authorization': F'''Bearer {token}'''}
UpperCAmelCase__ : Optional[Any] = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
UpperCAmelCase__ : Union[str, Any] = requests.get(_snake_case ,headers=_snake_case ).json()
UpperCAmelCase__ : str = {}
try:
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
UpperCAmelCase__ : List[Any] = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_snake_case ):
UpperCAmelCase__ : Dict = requests.get(url + F'''&page={i + 2}''' ,headers=_snake_case ).json()
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
return job_links
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowerCamelCase ( _snake_case ,_snake_case=None ):
UpperCAmelCase__ : Dict = None
if token is not None:
UpperCAmelCase__ : Optional[int] = {'Accept': 'application/vnd.github+json', 'Authorization': F'''Bearer {token}'''}
UpperCAmelCase__ : Tuple = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
UpperCAmelCase__ : Dict = requests.get(_snake_case ,headers=_snake_case ).json()
UpperCAmelCase__ : Optional[int] = {}
try:
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
UpperCAmelCase__ : Any = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_snake_case ):
UpperCAmelCase__ : str = requests.get(url + F'''&page={i + 2}''' ,headers=_snake_case ).json()
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
return artifacts
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowerCamelCase ( _snake_case ,_snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase__ : Optional[int] = None
if token is not None:
UpperCAmelCase__ : str = {'Accept': 'application/vnd.github+json', 'Authorization': F'''Bearer {token}'''}
UpperCAmelCase__ : Optional[Any] = requests.get(_snake_case ,headers=_snake_case ,allow_redirects=_snake_case )
UpperCAmelCase__ : str = result.headers['Location']
UpperCAmelCase__ : int = requests.get(_snake_case ,allow_redirects=_snake_case )
UpperCAmelCase__ : Tuple = os.path.join(_snake_case ,F'''{artifact_name}.zip''' )
with open(_snake_case ,'wb' ) as fp:
fp.write(response.content )
def lowerCamelCase ( _snake_case ,_snake_case=None ):
UpperCAmelCase__ : Dict = []
UpperCAmelCase__ : str = []
UpperCAmelCase__ : List[str] = None
with zipfile.ZipFile(_snake_case ) as z:
for filename in z.namelist():
if not os.path.isdir(_snake_case ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(_snake_case ) as f:
for line in f:
UpperCAmelCase__ : List[Any] = line.decode('UTF-8' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
UpperCAmelCase__ : List[Any] = line[: line.index(': ' )]
UpperCAmelCase__ : str = line[line.index(': ' ) + len(': ' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('FAILED ' ):
# `test` is the test method that failed
UpperCAmelCase__ : Dict = line[len('FAILED ' ) :]
failed_tests.append(_snake_case )
elif filename == "job_name.txt":
UpperCAmelCase__ : int = line
if len(_snake_case ) != len(_snake_case ):
raise ValueError(
F'''`errors` and `failed_tests` should have the same number of elements. Got {len(_snake_case )} for `errors` '''
F'''and {len(_snake_case )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
' problem.' )
UpperCAmelCase__ : int = None
if job_name and job_links:
UpperCAmelCase__ : str = job_links.get(_snake_case ,_snake_case )
# A list with elements of the form (line of error, error, failed test)
UpperCAmelCase__ : List[str] = [x + [y] + [job_link] for x, y in zip(_snake_case ,_snake_case )]
return result
def lowerCamelCase ( _snake_case ,_snake_case=None ):
UpperCAmelCase__ : Tuple = []
UpperCAmelCase__ : Any = [os.path.join(_snake_case ,_snake_case ) for p in os.listdir(_snake_case ) if p.endswith('.zip' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(_snake_case ,job_links=_snake_case ) )
return errors
def lowerCamelCase ( _snake_case ,_snake_case=None ):
UpperCAmelCase__ : List[str] = Counter()
counter.update([x[1] for x in logs] )
UpperCAmelCase__ : List[str] = counter.most_common()
UpperCAmelCase__ : Tuple = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
UpperCAmelCase__ : int = {'count': count, 'failed_tests': [(x[2], x[0]) for x in logs if x[1] == error]}
UpperCAmelCase__ : List[str] = dict(sorted(r.items() ,key=lambda _snake_case : item[1]["count"] ,reverse=_snake_case ) )
return r
def lowerCamelCase ( _snake_case ):
UpperCAmelCase__ : Optional[int] = test.split('::' )[0]
if test.startswith('tests/models/' ):
UpperCAmelCase__ : List[Any] = test.split('/' )[2]
else:
UpperCAmelCase__ : List[Any] = None
return test
def lowerCamelCase ( _snake_case ,_snake_case=None ):
UpperCAmelCase__ : str = [(x[0], x[1], get_model(x[2] )) for x in logs]
UpperCAmelCase__ : int = [x for x in logs if x[2] is not None]
UpperCAmelCase__ : Optional[Any] = {x[2] for x in logs}
UpperCAmelCase__ : Optional[Any] = {}
for test in tests:
UpperCAmelCase__ : str = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
UpperCAmelCase__ : Any = counter.most_common()
UpperCAmelCase__ : List[Any] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
UpperCAmelCase__ : str = sum(error_counts.values() )
if n_errors > 0:
UpperCAmelCase__ : Union[str, Any] = {'count': n_errors, 'errors': error_counts}
UpperCAmelCase__ : List[str] = dict(sorted(r.items() ,key=lambda _snake_case : item[1]["count"] ,reverse=_snake_case ) )
return r
def lowerCamelCase ( _snake_case ):
UpperCAmelCase__ : Tuple = '| no. | error | status |'
UpperCAmelCase__ : int = '|-:|:-|:-|'
UpperCAmelCase__ : Optional[Any] = [header, sep]
for error in reduced_by_error:
UpperCAmelCase__ : Optional[Any] = reduced_by_error[error]['count']
UpperCAmelCase__ : Union[str, Any] = F'''| {count} | {error[:100]} | |'''
lines.append(_snake_case )
return "\n".join(_snake_case )
def lowerCamelCase ( _snake_case ):
UpperCAmelCase__ : List[str] = '| model | no. of errors | major error | count |'
UpperCAmelCase__ : Any = '|-:|-:|-:|-:|'
UpperCAmelCase__ : Any = [header, sep]
for model in reduced_by_model:
UpperCAmelCase__ : Union[str, Any] = reduced_by_model[model]['count']
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = list(reduced_by_model[model]['errors'].items() )[0]
UpperCAmelCase__ : List[str] = F'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(_snake_case )
return "\n".join(_snake_case )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
UpperCamelCase__ = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
UpperCamelCase__ = get_job_links(args.workflow_run_id, token=args.token)
UpperCamelCase__ = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
UpperCamelCase__ = k.find(' / ')
UpperCamelCase__ = k[index + len(' / ') :]
UpperCamelCase__ = v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
UpperCamelCase__ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
UpperCamelCase__ = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
UpperCamelCase__ = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
UpperCamelCase__ = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
UpperCamelCase__ = reduce_by_error(errors)
UpperCamelCase__ = reduce_by_model(errors)
UpperCamelCase__ = make_github_table(reduced_by_error)
UpperCamelCase__ = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 110 |
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
snake_case__ : List[str] = logging.get_logger(__name__)
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = """vision-encoder-decoder"""
A_ = True
def __init__( self , **_UpperCAmelCase ) -> Dict:
super().__init__(**_UpperCAmelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f"""A configuraton of type {self.model_type} cannot be instantiated because """
f"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
UpperCamelCase_ = kwargs.pop('encoder' )
UpperCamelCase_ = encoder_config.pop('model_type' )
UpperCamelCase_ = kwargs.pop('decoder' )
UpperCamelCase_ = decoder_config.pop('model_type' )
UpperCamelCase_ = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
UpperCamelCase_ = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
UpperCamelCase_ = True
@classmethod
def _UpperCAmelCase ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> PretrainedConfig:
logger.info('Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
UpperCamelCase_ = True
UpperCamelCase_ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = copy.deepcopy(self.__dict__ )
UpperCamelCase_ = self.encoder.to_dict()
UpperCamelCase_ = self.decoder.to_dict()
UpperCamelCase_ = self.__class__.model_type
return output
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = version.parse("""1.11""" )
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _UpperCAmelCase ( self ) -> float:
return 1e-4
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict({'last_hidden_state': {0: 'batch', 1: 'encoder_sequence'}} )
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
UpperCamelCase_ = OrderedDict()
UpperCamelCase_ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
UpperCamelCase_ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
UpperCamelCase_ = {0: 'batch', 1: 'encoder_sequence'}
return common_inputs
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ) -> Mapping[str, Any]:
import torch
UpperCamelCase_ = OrderedDict()
UpperCamelCase_ = super().generate_dummy_inputs(
_UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = dummy_input['input_ids'].shape
UpperCamelCase_ = (batch, encoder_sequence, self._config.encoder_hidden_size)
UpperCamelCase_ = dummy_input.pop('input_ids' )
UpperCamelCase_ = dummy_input.pop('attention_mask' )
UpperCamelCase_ = torch.zeros(_UpperCAmelCase )
return common_inputs
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
@property
def _UpperCAmelCase ( self ) -> None:
pass
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> OnnxConfig:
return VisionEncoderDecoderEncoderOnnxConfig(_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = "default" ) -> OnnxConfig:
UpperCamelCase_ = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_UpperCAmelCase , _UpperCAmelCase )
| 23 | 0 |
'''simple docstring'''
def A_( A : Optional[int]):
UpperCamelCase = int(__lowercase)
if decimal in (0, 1): # Exit cases for the recursion
return str(__lowercase)
UpperCamelCase , UpperCamelCase = divmod(__lowercase , 2)
return binary_recursive(__lowercase) + str(__lowercase)
def A_( A : List[Any]):
UpperCamelCase = str(__lowercase).strip()
if not number:
raise ValueError('No input value was provided')
UpperCamelCase = '-' if number.startswith('-') else ''
UpperCamelCase = number.lstrip('-')
if not number.isnumeric():
raise ValueError('Input value is not an integer')
return f'''{negative}0b{binary_recursive(int(__lowercase))}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 3 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _snake_case (__lowercase , __lowercase , __lowercase):
# Initialise PyTorch model
UpperCamelCase_ = MobileBertConfig.from_json_file(__lowercase)
print(f"""Building PyTorch model from configuration: {config}""")
UpperCamelCase_ = MobileBertForPreTraining(__lowercase)
# Load weights from tf checkpoint
UpperCamelCase_ = load_tf_weights_in_mobilebert(__lowercase , __lowercase , __lowercase)
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""")
torch.save(model.state_dict() , __lowercase)
if __name__ == "__main__":
snake_case__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--mobilebert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained MobileBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
snake_case__ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 23 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> List[str]:
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(__lowercase , __lowercase ) ) )
def __UpperCAmelCase ( _UpperCAmelCase : Tuple , _UpperCAmelCase : int ) -> Optional[Any]:
if dataset.ndim != value_array.ndim:
__snake_case = (
"Wrong input data\'s dimensions... "
F'''dataset : {dataset.ndim}, value_array : {value_array.ndim}'''
)
raise ValueError(__lowercase )
try:
if dataset.shape[1] != value_array.shape[1]:
__snake_case = (
"Wrong input data\'s shape... "
F'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'''
)
raise ValueError(__lowercase )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
__snake_case = (
"Input data have different datatype... "
F'''dataset : {dataset.dtype}, value_array : {value_array.dtype}'''
)
raise TypeError(__lowercase )
__snake_case = []
for value in value_array:
__snake_case = euclidean(__lowercase , dataset[0] )
__snake_case = dataset[0].tolist()
for dataset_value in dataset[1:]:
__snake_case = euclidean(__lowercase , __lowercase )
if dist > temp_dist:
__snake_case = temp_dist
__snake_case = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str ) -> Any:
return np.dot(__lowercase , __lowercase ) / (norm(__lowercase ) * norm(__lowercase ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 69 |
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _a ( unittest.TestCase ):
"""simple docstring"""
A_ = MODEL_FOR_MASKED_LM_MAPPING
A_ = TF_MODEL_FOR_MASKED_LM_MAPPING
def _UpperCAmelCase ( self ) -> List[str]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='tf' )
UpperCamelCase_ = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{'sequence': 'My name is grouped', 'score': 2.1e-05, 'token': 38015, 'token_str': ' grouped'},
{'sequence': 'My name is accuser', 'score': 2.1e-05, 'token': 25506, 'token_str': ' accuser'},
] , )
UpperCamelCase_ = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{
'sequence': 'The largest city in France is grouped',
'score': 2.1e-05,
'token': 38015,
'token_str': ' grouped',
},
{
'sequence': 'The largest city in France is accuser',
'score': 2.1e-05,
'token': 25506,
'token_str': ' accuser',
},
] , )
UpperCamelCase_ = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{'sequence': 'My name is Clara', 'score': 2e-05, 'token': 13606, 'token_str': ' Clara'},
{'sequence': 'My name is Patrick', 'score': 2e-05, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 1.9e-05, 'token': 2941, 'token_str': ' Te'},
] , )
@require_torch
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='pt' )
UpperCamelCase_ = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{'sequence': 'My name is Maul', 'score': 2.2e-05, 'token': 35676, 'token_str': ' Maul'},
{'sequence': 'My name isELS', 'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS'},
] , )
UpperCamelCase_ = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{
'sequence': 'The largest city in France is Maul',
'score': 2.2e-05,
'token': 35676,
'token_str': ' Maul',
},
{'sequence': 'The largest city in France isELS', 'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS'},
] , )
UpperCamelCase_ = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{'sequence': 'My name is Patrick', 'score': 2.1e-05, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 2e-05, 'token': 2941, 'token_str': ' Te'},
{'sequence': 'My name is Clara', 'score': 2e-05, 'token': 13606, 'token_str': ' Clara'},
] , )
UpperCamelCase_ = unmasker('My name is <mask> <mask>' , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
[
{
'score': 2.2e-05,
'token': 35676,
'token_str': ' Maul',
'sequence': '<s>My name is Maul<mask></s>',
},
{'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name isELS<mask></s>'},
],
[
{
'score': 2.2e-05,
'token': 35676,
'token_str': ' Maul',
'sequence': '<s>My name is<mask> Maul</s>',
},
{'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name is<mask>ELS</s>'},
],
] , )
@require_torch_gpu
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = pipeline('fill-mask' , model='hf-internal-testing/tiny-random-distilbert' , device=0 , framework='pt' )
# convert model to fp16
pipe.model.half()
UpperCamelCase_ = pipe('Paris is the [MASK] of France.' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
@slow
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='pt' )
self.run_large_test(_UpperCAmelCase )
@slow
@require_tf
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='tf' )
self.run_large_test(_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Tuple:
UpperCamelCase_ = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , [
{'sequence': 'My name is John', 'score': 0.0_0_8, 'token': 610, 'token_str': ' John'},
{'sequence': 'My name is Chris', 'score': 0.0_0_7, 'token': 1573, 'token_str': ' Chris'},
] , )
UpperCamelCase_ = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , [
{
'sequence': 'The largest city in France is Paris',
'score': 0.2_5_1,
'token': 2201,
'token_str': ' Paris',
},
{
'sequence': 'The largest city in France is Lyon',
'score': 0.2_1_4,
'token': 12790,
'token_str': ' Lyon',
},
] , )
UpperCamelCase_ = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , [
{'sequence': 'My name is Patrick', 'score': 0.0_0_5, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Clara', 'score': 0.0_0_0, 'token': 13606, 'token_str': ' Clara'},
{'sequence': 'My name is Te', 'score': 0.0_0_0, 'token': 2941, 'token_str': ' Te'},
] , )
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='pt' )
UpperCamelCase_ = None
UpperCamelCase_ = None
self.run_pipeline_test(_UpperCAmelCase , [] )
@require_tf
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='tf' )
UpperCamelCase_ = None
UpperCamelCase_ = None
self.run_pipeline_test(_UpperCAmelCase , [] )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('The provided tokenizer has no mask token, (probably reformer or wav2vec2)' )
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = [
f"""This is another {tokenizer.mask_token} test""",
]
return fill_masker, examples
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
UpperCamelCase_ = fill_masker.tokenizer
UpperCamelCase_ = fill_masker.model
UpperCamelCase_ = fill_masker(
f"""This is a {tokenizer.mask_token}""" , )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = fill_masker([f"""This is a {tokenizer.mask_token}"""] )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = fill_masker([f"""This is a {tokenizer.mask_token}""", f"""Another {tokenizer.mask_token} great test."""] )
self.assertEqual(
_UpperCAmelCase , [
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
] , )
with self.assertRaises(_UpperCAmelCase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_UpperCAmelCase ):
fill_masker('This is' )
self.run_test_top_k(_UpperCAmelCase , _UpperCAmelCase )
self.run_test_targets(_UpperCAmelCase , _UpperCAmelCase )
self.run_test_top_k_targets(_UpperCAmelCase , _UpperCAmelCase )
self.fill_mask_with_duplicate_targets_and_top_k(_UpperCAmelCase , _UpperCAmelCase )
self.fill_mask_with_multiple_masks(_UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
UpperCamelCase_ = tokenizer.get_vocab()
UpperCamelCase_ = sorted(vocab.keys() )[:2]
# Pipeline argument
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , targets=_UpperCAmelCase )
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , _UpperCAmelCase )
UpperCamelCase_ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(_UpperCAmelCase ) )
# Call argument
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , _UpperCAmelCase )
UpperCamelCase_ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(_UpperCAmelCase ) )
# Score equivalence
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase )
UpperCamelCase_ = [top_mask['token_str'] for top_mask in outputs]
UpperCamelCase_ = [top_mask['score'] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_UpperCAmelCase ) == set(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase )
UpperCamelCase_ = [top_mask['score'] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) )
# Raises with invalid
with self.assertRaises(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[''] )
with self.assertRaises(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets='' )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , top_k=2 )
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
UpperCamelCase_ = tokenizer.get_vocab()
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
# top_k=2, ntargets=3
UpperCamelCase_ = sorted(vocab.keys() )[:3]
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=_UpperCAmelCase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
UpperCamelCase_ = [el['token_str'] for el in sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x["score"] , reverse=_UpperCAmelCase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_UpperCAmelCase ).issubset(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=_UpperCAmelCase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = tokenizer.get_vocab()
# String duplicates + id duplicates
UpperCamelCase_ = sorted(vocab.keys() )[:3]
UpperCamelCase_ = [targets[0], targets[1], targets[0], targets[2], targets[1]]
UpperCamelCase_ = fill_masker(f"""My name is {tokenizer.mask_token}""" , targets=_UpperCAmelCase , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_UpperCAmelCase ) , 3 )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = fill_masker(
f"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
_UpperCAmelCase , [
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
] , )
| 23 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
A__ : Optional[int] = logging.get_logger(__name__)
A__ : Optional[Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
A__ : Any = {
"""vocab_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/vocab.txt""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/vocab.txt""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"""
),
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"""
),
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt""",
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"""
),
"""bert-base-multilingual-cased""": (
"""https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-cased""": (
"""https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"""
),
},
}
A__ : List[str] = {
"""bert-base-uncased""": 512,
"""bert-large-uncased""": 512,
"""bert-base-cased""": 512,
"""bert-large-cased""": 512,
"""bert-base-multilingual-uncased""": 512,
"""bert-base-multilingual-cased""": 512,
"""bert-base-chinese""": 512,
"""bert-base-german-cased""": 512,
"""bert-large-uncased-whole-word-masking""": 512,
"""bert-large-cased-whole-word-masking""": 512,
"""bert-large-uncased-whole-word-masking-finetuned-squad""": 512,
"""bert-large-cased-whole-word-masking-finetuned-squad""": 512,
"""bert-base-cased-finetuned-mrpc""": 512,
"""bert-base-german-dbmdz-cased""": 512,
"""bert-base-german-dbmdz-uncased""": 512,
"""TurkuNLP/bert-base-finnish-cased-v1""": 512,
"""TurkuNLP/bert-base-finnish-uncased-v1""": 512,
"""wietsedv/bert-base-dutch-cased""": 512,
}
A__ : Tuple = {
"""bert-base-uncased""": {"""do_lower_case""": True},
"""bert-large-uncased""": {"""do_lower_case""": True},
"""bert-base-cased""": {"""do_lower_case""": False},
"""bert-large-cased""": {"""do_lower_case""": False},
"""bert-base-multilingual-uncased""": {"""do_lower_case""": True},
"""bert-base-multilingual-cased""": {"""do_lower_case""": False},
"""bert-base-chinese""": {"""do_lower_case""": False},
"""bert-base-german-cased""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": False},
"""bert-base-cased-finetuned-mrpc""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-cased""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-uncased""": {"""do_lower_case""": True},
"""TurkuNLP/bert-base-finnish-cased-v1""": {"""do_lower_case""": False},
"""TurkuNLP/bert-base-finnish-uncased-v1""": {"""do_lower_case""": True},
"""wietsedv/bert-base-dutch-cased""": {"""do_lower_case""": False},
}
class __snake_case ( UpperCAmelCase__ ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_INIT_CONFIGURATION
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = BertTokenizer
def __init__( self : Optional[Any] , A_ : List[str]=None , A_ : str=None , A_ : Optional[int]=True , A_ : List[str]="[UNK]" , A_ : Union[str, Any]="[SEP]" , A_ : Optional[int]="[PAD]" , A_ : Dict="[CLS]" , A_ : Any="[MASK]" , A_ : Any=True , A_ : int=None , **A_ : Tuple , ):
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , tokenize_chinese_chars=_UpperCAmelCase , strip_accents=_UpperCAmelCase , **_UpperCAmelCase , )
lowerCAmelCase_ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('''lowercase''' , _UpperCAmelCase) != do_lower_case
or normalizer_state.get('''strip_accents''' , _UpperCAmelCase) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _UpperCAmelCase) != tokenize_chinese_chars
):
lowerCAmelCase_ : List[Any] = getattr(_UpperCAmelCase , normalizer_state.pop('''type'''))
lowerCAmelCase_ : Any = do_lower_case
lowerCAmelCase_ : Dict = strip_accents
lowerCAmelCase_ : Any = tokenize_chinese_chars
lowerCAmelCase_ : int = normalizer_class(**_UpperCAmelCase)
lowerCAmelCase_ : Optional[int] = do_lower_case
def UpperCAmelCase__ ( self : Optional[Any] , A_ : Union[str, Any] , A_ : List[str]=None):
lowerCAmelCase_ : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__ ( self : str , A_ : int , A_ : List[Any] = None):
lowerCAmelCase_ : Optional[Any] = [self.sep_token_id]
lowerCAmelCase_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def UpperCAmelCase__ ( self : Optional[Any] , A_ : Optional[Any] , A_ : int = None):
lowerCAmelCase_ : Tuple = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase)
return tuple(_UpperCAmelCase)
| 171 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = StableDiffusionSAGPipeline
A_ = TEXT_TO_IMAGE_PARAMS
A_ = TEXT_TO_IMAGE_BATCH_PARAMS
A_ = TEXT_TO_IMAGE_IMAGE_PARAMS
A_ = TEXT_TO_IMAGE_IMAGE_PARAMS
A_ = False
def _UpperCAmelCase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCamelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
UpperCamelCase_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , )
torch.manual_seed(0 )
UpperCamelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCamelCase_ = CLIPTextModel(_UpperCAmelCase )
UpperCamelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCamelCase_ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase=0 ) -> List[Any]:
if str(_UpperCAmelCase ).startswith('mps' ):
UpperCamelCase_ = torch.manual_seed(_UpperCAmelCase )
else:
UpperCamelCase_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
UpperCamelCase_ = {
'prompt': '.',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 1.0,
'sag_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def _UpperCAmelCase ( self ) -> Tuple:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
UpperCamelCase_ = sag_pipe.to(_UpperCAmelCase )
sag_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCamelCase_ = '.'
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = sag_pipe(
[prompt] , generator=_UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
UpperCamelCase_ = output.images
UpperCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase_ = np.array([0.1_5_6_8, 0.1_7_3_8, 0.1_6_9_5, 0.1_6_9_3, 0.1_5_0_7, 0.1_7_0_5, 0.1_5_4_7, 0.1_7_5_1, 0.1_9_4_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
UpperCamelCase_ = sag_pipe.to(_UpperCAmelCase )
sag_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCamelCase_ = '.'
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = sag_pipe(
[prompt] , generator=_UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
UpperCamelCase_ = output.images
UpperCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase_ = np.array([0.3_4_5_9, 0.2_8_7_6, 0.2_5_3_7, 0.3_0_0_2, 0.2_6_7_1, 0.2_1_6_0, 0.3_0_2_6, 0.2_2_6_2, 0.2_3_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
UpperCamelCase_ = sag_pipe.to(_UpperCAmelCase )
sag_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCamelCase_ = '.'
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = sag_pipe(
[prompt] , width=768 , height=512 , generator=_UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' , )
UpperCamelCase_ = output.images
assert image.shape == (1, 512, 768, 3)
| 23 | 0 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _A ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self ,SCREAMING_SNAKE_CASE_ = 768 ,):
'''simple docstring'''
super().__init__()
snake_case : Tuple = nn.Parameter(torch.zeros(1 ,_UpperCAmelCase ) )
snake_case : Union[str, Any] = nn.Parameter(torch.ones(1 ,_UpperCAmelCase ) )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,):
'''simple docstring'''
snake_case : int = nn.Parameter(self.mean.to(_UpperCAmelCase ).to(_UpperCAmelCase ) )
snake_case : Union[str, Any] = nn.Parameter(self.std.to(_UpperCAmelCase ).to(_UpperCAmelCase ) )
return self
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : List[Any] = (embeds - self.mean) * 1.0 / self.std
return embeds
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : List[Any] = (embeds * self.std) + self.mean
return embeds
| 36 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
snake_case__ : List[str] = TypeVar("""T""")
def _snake_case (__lowercase):
return (position - 1) // 2
def _snake_case (__lowercase):
return (2 * position) + 1
def _snake_case (__lowercase):
return (2 * position) + 2
class _a ( Generic[T] ):
"""simple docstring"""
def __init__( self ) -> None:
UpperCamelCase_ = []
UpperCamelCase_ = {}
UpperCamelCase_ = 0
def __len__( self ) -> int:
return self.elements
def __repr__( self ) -> str:
return str(self.heap )
def _UpperCAmelCase ( self ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
UpperCamelCase_ = self.elements
self.elements += 1
self._bubble_up(_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
UpperCamelCase_ , UpperCamelCase_ = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
UpperCamelCase_ , UpperCamelCase_ = self.heap[0]
self._bubble_down(_UpperCAmelCase )
return elem
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> None:
# Update the weight of the given key
UpperCamelCase_ = self.position_map[elem]
UpperCamelCase_ = (elem, weight)
if position > 0:
UpperCamelCase_ = get_parent_position(_UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(_UpperCAmelCase )
else:
self._bubble_down(_UpperCAmelCase )
else:
self._bubble_down(_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
UpperCamelCase_ = self.position_map[elem]
if curr_pos == 0:
return None
UpperCamelCase_ = get_parent_position(_UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = self.heap[curr_pos]
UpperCamelCase_ , UpperCamelCase_ = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_up(_UpperCAmelCase )
return None
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
UpperCamelCase_ = self.position_map[elem]
UpperCamelCase_ , UpperCamelCase_ = self.heap[curr_pos]
UpperCamelCase_ = get_child_left_position(_UpperCAmelCase )
UpperCamelCase_ = get_child_right_position(_UpperCAmelCase )
if child_left_position < self.elements and child_right_position < self.elements:
UpperCamelCase_ , UpperCamelCase_ = self.heap[child_left_position]
UpperCamelCase_ , UpperCamelCase_ = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_down(_UpperCAmelCase )
if child_left_position < self.elements:
UpperCamelCase_ , UpperCamelCase_ = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_down(_UpperCAmelCase )
else:
return None
if child_right_position < self.elements:
UpperCamelCase_ , UpperCamelCase_ = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_down(_UpperCAmelCase )
return None
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> None:
# Swap the nodes at the given positions
UpperCamelCase_ = self.heap[nodea_pos][0]
UpperCamelCase_ = self.heap[nodea_pos][0]
UpperCamelCase_ , UpperCamelCase_ = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
UpperCamelCase_ = nodea_pos
UpperCamelCase_ = nodea_pos
class _a ( Generic[T] ):
"""simple docstring"""
def __init__( self ) -> None:
UpperCamelCase_ = {}
UpperCamelCase_ = 0
def __repr__( self ) -> str:
return str(self.connections )
def __len__( self ) -> int:
return self.nodes
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
UpperCamelCase_ = {}
self.nodes += 1
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(_UpperCAmelCase )
self.add_node(_UpperCAmelCase )
UpperCamelCase_ = weight
UpperCamelCase_ = weight
def _snake_case (__lowercase , ):
UpperCamelCase_ = {node: maxsize for node in graph.connections}
UpperCamelCase_ = {node: None for node in graph.connections}
UpperCamelCase_ = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(__lowercase , __lowercase)
if priority_queue.is_empty():
return dist, parent
# initialization
UpperCamelCase_ = priority_queue.extract_min()
UpperCamelCase_ = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
UpperCamelCase_ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowercase , dist[neighbour])
UpperCamelCase_ = node
# running prim's algorithm
while not priority_queue.is_empty():
UpperCamelCase_ = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
UpperCamelCase_ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowercase , dist[neighbour])
UpperCamelCase_ = node
return dist, parent
| 23 | 0 |
'''simple docstring'''
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
UpperCamelCase_ : Optional[int] = logging.get_logger(__name__)
def _lowerCAmelCase (_lowercase , _lowercase , _lowercase ):
"""simple docstring"""
a__ = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(__lowercase ) == len(__lowercase ), F'{len(__lowercase )} != {len(__lowercase )}'
dest_layers.load_state_dict(layers_to_copy.state_dict() )
UpperCamelCase_ : Tuple = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
UpperCamelCase_ : Any = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def _lowerCAmelCase (_lowercase , _lowercase ):
"""simple docstring"""
try:
a__ = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F'no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'
F' {n_student}' )
return list(range(__lowercase ) )
def _lowerCAmelCase (_lowercase , _lowercase ):
"""simple docstring"""
if n_student > n_teacher:
raise ValueError(F'Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}' )
elif n_teacher == n_student:
return list(range(__lowercase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def _lowerCAmelCase (_lowercase , _lowercase = "student" , _lowercase = None , _lowercase = None , _lowercase=False , _lowercase=None , _lowercase=None , **_lowercase , ):
"""simple docstring"""
a__ = "encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."
assert (e is not None) or (d is not None), _msg
if isinstance(__lowercase , __lowercase ):
AutoTokenizer.from_pretrained(__lowercase ).save_pretrained(__lowercase ) # purely for convenience
a__ = AutoModelForSeqaSeqLM.from_pretrained(__lowercase ).eval()
else:
assert isinstance(__lowercase , __lowercase ), F'teacher must be a model or string got type {type(__lowercase )}'
a__ = teacher.config.to_diff_dict()
try:
a__ , a__ = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
a__ = teacher_e
if d is None:
a__ = teacher_d
init_kwargs.update({"encoder_layers": e, "decoder_layers": d} )
except AttributeError: # T5
if hasattr(teacher.config , "num_encoder_layers" ):
a__ , a__ = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
a__ , a__ = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
a__ = teacher_e
if d is None:
a__ = teacher_d
if hasattr(teacher.config , "num_encoder_layers" ):
init_kwargs.update({"num_encoder_layers": e, "num_decoder_layers": d} )
else:
init_kwargs.update({"num_layers": e, "num_decoder_layers": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(__lowercase )
# Copy weights
a__ = teacher.config_class(**__lowercase )
a__ = AutoModelForSeqaSeqLM.from_config(__lowercase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
a__ = student.load_state_dict(teacher.state_dict() , strict=__lowercase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
a__ , a__ = list(range(__lowercase ) ), list(range(__lowercase ) )
logger.info(
F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'
F' {save_path}' )
student.save_pretrained(__lowercase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
a__ = pick_layers_to_copy(__lowercase , __lowercase )
if d_layers_to_copy is None:
a__ = pick_layers_to_copy(__lowercase , __lowercase )
try:
if hasattr(
__lowercase , "prophetnet" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , __lowercase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , __lowercase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , __lowercase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , __lowercase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , __lowercase )
copy_layers(teacher.decoder.block , student.decoder.block , __lowercase )
logger.info(
F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}' )
a__ = {
"teacher_type": teacher.config.model_type,
"copied_encoder_layers": e_layers_to_copy,
"copied_decoder_layers": d_layers_to_copy,
}
student.save_pretrained(__lowercase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 331 |
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
snake_case__ : Dict = TypeVar("""T""")
class _a ( Generic[T] ):
"""simple docstring"""
A_ = 42 # Cache store of keys
A_ = 42 # References of the keys in cache
A_ = 10 # Maximum capacity of cache
def __init__( self , _UpperCAmelCase ) -> None:
UpperCamelCase_ = deque()
UpperCamelCase_ = set()
if not n:
UpperCamelCase_ = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
UpperCamelCase_ = n
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
UpperCamelCase_ = self.dq_store.pop()
self.key_reference.remove(_UpperCAmelCase )
else:
self.dq_store.remove(_UpperCAmelCase )
self.dq_store.appendleft(_UpperCAmelCase )
self.key_reference.add(_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> None:
for k in self.dq_store:
print(_UpperCAmelCase )
def __repr__( self ) -> str:
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ : LRUCache[str | int] = LRUCache(4)
lru_cache.refer("""A""")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("""A""")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 23 | 0 |
from torch import nn
def A_ ( lowercase_ ) -> List[str]:
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'''Unsupported activation function: {act_fn}''' )
| 326 |
import numpy as np
def _snake_case (__lowercase):
return 1 / (1 + np.exp(-vector))
def _snake_case (__lowercase):
return vector * sigmoid(__lowercase)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23 | 0 |
"""simple docstring"""
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def __lowercase ( self : List[str] ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(_UpperCAmelCase ):
lowerCAmelCase = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = FlaxAutoModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
@slow
def __lowercase ( self : Dict ):
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(_UpperCAmelCase ):
lowerCAmelCase = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = FlaxAutoModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
@slow
def __lowercase ( self : Tuple ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
lowerCAmelCase = AutoTokenizer.from_pretrained(_UpperCAmelCase )
lowerCAmelCase = FlaxBertModel.from_pretrained(_UpperCAmelCase )
lowerCAmelCase = tokenizer("""Do you support jax jitted function?""" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**lowerCAmelCase : int ):
return model(**_UpperCAmelCase )
eval(**_UpperCAmelCase ).block_until_ready()
@slow
def __lowercase ( self : Union[str, Any] ):
for model_name in ["roberta-base", "roberta-large"]:
lowerCAmelCase = AutoTokenizer.from_pretrained(_UpperCAmelCase )
lowerCAmelCase = FlaxRobertaModel.from_pretrained(_UpperCAmelCase )
lowerCAmelCase = tokenizer("""Do you support jax jitted function?""" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**lowerCAmelCase : Optional[Any] ):
return model(**_UpperCAmelCase )
eval(**_UpperCAmelCase ).block_until_ready()
def __lowercase ( self : Union[str, Any] ):
with self.assertRaisesRegex(
_UpperCAmelCase , """bert-base is not a local folder and is not a valid model identifier""" ):
lowerCAmelCase = FlaxAutoModel.from_pretrained("""bert-base""" )
def __lowercase ( self : Tuple ):
with self.assertRaisesRegex(
_UpperCAmelCase , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
lowerCAmelCase = FlaxAutoModel.from_pretrained(_UpperCAmelCase , revision="""aaaaaa""" )
def __lowercase ( self : Tuple ):
with self.assertRaisesRegex(
_UpperCAmelCase , """hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack""" , ):
lowerCAmelCase = FlaxAutoModel.from_pretrained("""hf-internal-testing/config-no-model""" )
def __lowercase ( self : Tuple ):
with self.assertRaisesRegex(_UpperCAmelCase , """Use `from_pt=True` to load this model""" ):
lowerCAmelCase = FlaxAutoModel.from_pretrained("""hf-internal-testing/tiny-bert-pt-only""" )
| 169 |
import math
from datetime import datetime, timedelta
def _snake_case (__lowercase):
UpperCamelCase_ = year % 19
UpperCamelCase_ = year % 4
UpperCamelCase_ = year % 7
UpperCamelCase_ = math.floor(year / 100)
UpperCamelCase_ = math.floor((13 + 8 * leap_day_inhibits) / 25)
UpperCamelCase_ = leap_day_inhibits / 4
UpperCamelCase_ = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
UpperCamelCase_ = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
UpperCamelCase_ = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
UpperCamelCase_ = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(__lowercase , 4 , 19)
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(__lowercase , 4 , 18)
else:
return datetime(__lowercase , 3 , 22) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday))
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
snake_case__ : Dict = """will be""" if year > datetime.now().year else """was"""
print(f'Easter in {year} {tense} {gauss_easter(year)}')
| 23 | 0 |
'''simple docstring'''
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ ):
__a : Optional[Any] = False
while is_sorted is False: # Until all the indices are traversed keep looping
__a : Tuple = True
for i in range(0 , len(__lowercase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
__a , __a : List[Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
__a : Tuple = False
for i in range(1 , len(__lowercase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
__a , __a : Optional[int] = input_list[i + 1], input_list[i]
# swapping if elements not in order
__a : Any = False
return input_list
if __name__ == "__main__":
print("Enter list to be sorted")
SCREAMING_SNAKE_CASE_ = [int(x) for x in input().split()]
# inputing elements of the list in one line
SCREAMING_SNAKE_CASE_ = odd_even_sort(input_list)
print("The sorted list is")
print(sorted_list)
| 597 |
import requests
def _snake_case (__lowercase , __lowercase):
UpperCamelCase_ = {'Content-Type': 'application/json'}
UpperCamelCase_ = requests.post(__lowercase , json={'text': message_body} , headers=__lowercase)
if response.status_code != 200:
UpperCamelCase_ = (
'Request to slack returned an error '
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(__lowercase)
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
| 23 | 0 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class A_ ( UpperCAmelCase__ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = TextToVideoSDPipeline
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
_SCREAMING_SNAKE_CASE = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def _UpperCAmelCase ( self : Any ):
torch.manual_seed(0 )
__a = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
__a = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , )
torch.manual_seed(0 )
__a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , )
__a = CLIPTextModel(_UpperCAmelCase )
__a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__a = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def _UpperCAmelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple=0 ):
if str(_UpperCAmelCase ).startswith("mps" ):
__a = torch.manual_seed(_UpperCAmelCase )
else:
__a = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
__a = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def _UpperCAmelCase ( self : Optional[Any] ):
__a = "cpu" # ensure determinism for the device-dependent torch.Generator
__a = self.get_dummy_components()
__a = TextToVideoSDPipeline(**_UpperCAmelCase )
__a = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a = self.get_dummy_inputs(_UpperCAmelCase )
__a = "np"
__a = sd_pipe(**_UpperCAmelCase ).frames
__a = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
__a = np.array([1_58.0, 1_60.0, 1_53.0, 1_25.0, 1_00.0, 1_21.0, 1_11.0, 93.0, 1_13.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCAmelCase ( self : List[Any] ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_UpperCAmelCase , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _UpperCAmelCase ( self : Optional[Any] ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_UpperCAmelCase , expected_max_diff=1E-2 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def _UpperCAmelCase ( self : Optional[int] ):
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def _UpperCAmelCase ( self : List[str] ):
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def _UpperCAmelCase ( self : int ):
pass
def _UpperCAmelCase ( self : Tuple ):
return super().test_progress_bar()
@slow
@skip_mps
class A_ ( unittest.TestCase ):
def _UpperCAmelCase ( self : str ):
__a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy" )
__a = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
__a = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__a = pipe.to("cuda" )
__a = "Spiderman is surfing"
__a = torch.Generator(device="cpu" ).manual_seed(0 )
__a = pipe(_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=25 , output_type="pt" ).frames
__a = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def _UpperCAmelCase ( self : int ):
__a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy" )
__a = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
__a = pipe.to("cuda" )
__a = "Spiderman is surfing"
__a = torch.Generator(device="cpu" ).manual_seed(0 )
__a = pipe(_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type="pt" ).frames
__a = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 197 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Dict:
with open(_UpperCAmelCase , encoding='utf-8' ) as input_file:
UpperCamelCase_ = re.compile(R'(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)' )
UpperCamelCase_ = input_file.read()
UpperCamelCase_ = regexp.search(_UpperCAmelCase )
return match
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Dict:
with open(_UpperCAmelCase , encoding='utf-8' ) as input_file:
UpperCamelCase_ = re.compile(R'#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()' , re.DOTALL )
UpperCamelCase_ = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCamelCase_ = regexp.finditer(_UpperCAmelCase )
UpperCamelCase_ = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ = Path('./datasets' )
UpperCamelCase_ = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(_UpperCAmelCase ) ):
raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" )
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = Path('./datasets' )
UpperCamelCase_ = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_print_statements(str(_UpperCAmelCase ) ):
raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 23 | 0 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : Optional[int] , __a : Tuple , __a : Union[str, Any] , __a : str , __a : Union[str, Any] , __a : Tuple=0.2 , __a : List[str]=0.2 ) ->List[Any]:
lowerCamelCase_ : str = bp_numa
lowerCamelCase_ : Tuple = bp_numa
lowerCamelCase_ : Optional[int] = bp_numa
lowerCamelCase_ : Optional[Any] = conva_get[:2]
lowerCamelCase_ : Dict = conva_get[2]
lowerCamelCase_ : Union[str, Any] = size_pa
lowerCamelCase_ : Optional[Any] = rate_w
lowerCamelCase_ : Any = rate_t
lowerCamelCase_ : Optional[Any] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowerCamelCase_ : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowerCamelCase_ : List[Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowerCamelCase_ : str = -2 * np.random.rand(self.conva[1] ) + 1
lowerCamelCase_ : List[str] = -2 * np.random.rand(self.num_bpa ) + 1
lowerCamelCase_ : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1
def _lowerCAmelCase ( self : Optional[Any] , __a : Tuple ) ->int:
# save model dict with pickle
lowerCamelCase_ : Any = {
"""num_bp1""": self.num_bpa,
"""num_bp2""": self.num_bpa,
"""num_bp3""": self.num_bpa,
"""conv1""": self.conva,
"""step_conv1""": self.step_conva,
"""size_pooling1""": self.size_poolinga,
"""rate_weight""": self.rate_weight,
"""rate_thre""": self.rate_thre,
"""w_conv1""": self.w_conva,
"""wkj""": self.wkj,
"""vji""": self.vji,
"""thre_conv1""": self.thre_conva,
"""thre_bp2""": self.thre_bpa,
"""thre_bp3""": self.thre_bpa,
}
with open(_UpperCAmelCase , """wb""" ) as f:
pickle.dump(_UpperCAmelCase , _UpperCAmelCase )
print(F'''Model saved: {save_path}''' )
@classmethod
def _lowerCAmelCase ( cls : List[str] , __a : Union[str, Any] ) ->Optional[Any]:
# read saved model
with open(_UpperCAmelCase , """rb""" ) as f:
lowerCamelCase_ : Optional[Any] = pickle.load(_UpperCAmelCase ) # noqa: S301
lowerCamelCase_ : Tuple = model_dic.get("""conv1""" )
conv_get.append(model_dic.get("""step_conv1""" ) )
lowerCamelCase_ : Union[str, Any] = model_dic.get("""size_pooling1""" )
lowerCamelCase_ : Optional[int] = model_dic.get("""num_bp1""" )
lowerCamelCase_ : str = model_dic.get("""num_bp2""" )
lowerCamelCase_ : Optional[Any] = model_dic.get("""num_bp3""" )
lowerCamelCase_ : Any = model_dic.get("""rate_weight""" )
lowerCamelCase_ : str = model_dic.get("""rate_thre""" )
# create model instance
lowerCamelCase_ : str = CNN(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# modify model parameter
lowerCamelCase_ : str = model_dic.get("""w_conv1""" )
lowerCamelCase_ : Optional[Any] = model_dic.get("""wkj""" )
lowerCamelCase_ : int = model_dic.get("""vji""" )
lowerCamelCase_ : Any = model_dic.get("""thre_conv1""" )
lowerCamelCase_ : Tuple = model_dic.get("""thre_bp2""" )
lowerCamelCase_ : int = model_dic.get("""thre_bp3""" )
return conv_ins
def _lowerCAmelCase ( self : Optional[Any] , __a : List[str] ) ->Tuple:
return 1 / (1 + np.exp(-1 * x ))
def _lowerCAmelCase ( self : str , __a : Optional[int] ) ->str:
return round(_UpperCAmelCase , 3 )
def _lowerCAmelCase ( self : Dict , __a : Optional[Any] , __a : Any , __a : int , __a : List[str] , __a : int ) ->Optional[int]:
# convolution process
lowerCamelCase_ : Optional[Any] = convs[0]
lowerCamelCase_ : List[str] = convs[1]
lowerCamelCase_ : Any = np.shape(_UpperCAmelCase )[0]
# get the data slice of original image data, data_focus
lowerCamelCase_ : Union[str, Any] = []
for i_focus in range(0 , size_data - size_conv + 1 , _UpperCAmelCase ):
for j_focus in range(0 , size_data - size_conv + 1 , _UpperCAmelCase ):
lowerCamelCase_ : Any = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(_UpperCAmelCase )
# calculate the feature map of every single kernel, and saved as list of matrix
lowerCamelCase_ : Any = []
lowerCamelCase_ : Union[str, Any] = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(_UpperCAmelCase ):
lowerCamelCase_ : List[str] = []
for i_focus in range(len(_UpperCAmelCase ) ):
lowerCamelCase_ : Union[str, Any] = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(_UpperCAmelCase ) )
lowerCamelCase_ : Union[str, Any] = np.asmatrix(_UpperCAmelCase ).reshape(
_UpperCAmelCase , _UpperCAmelCase )
data_featuremap.append(_UpperCAmelCase )
# expanding the data slice to One dimenssion
lowerCamelCase_ : Union[str, Any] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(_UpperCAmelCase ) )
lowerCamelCase_ : Dict = np.asarray(_UpperCAmelCase )
return focus_list, data_featuremap
def _lowerCAmelCase ( self : int , __a : Optional[int] , __a : Union[str, Any] , __a : List[str]="average_pool" ) ->Tuple:
# pooling process
lowerCamelCase_ : Optional[int] = len(featuremaps[0] )
lowerCamelCase_ : Union[str, Any] = int(size_map / size_pooling )
lowerCamelCase_ : Any = []
for i_map in range(len(_UpperCAmelCase ) ):
lowerCamelCase_ : List[Any] = featuremaps[i_map]
lowerCamelCase_ : int = []
for i_focus in range(0 , _UpperCAmelCase , _UpperCAmelCase ):
for j_focus in range(0 , _UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase_ : int = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(_UpperCAmelCase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(_UpperCAmelCase ) )
lowerCamelCase_ : List[str] = np.asmatrix(_UpperCAmelCase ).reshape(_UpperCAmelCase , _UpperCAmelCase )
featuremap_pooled.append(_UpperCAmelCase )
return featuremap_pooled
def _lowerCAmelCase ( self : Optional[int] , __a : Tuple ) ->Union[str, Any]:
# expanding three dimension data to one dimension list
lowerCamelCase_ : Optional[int] = []
for i in range(len(_UpperCAmelCase ) ):
lowerCamelCase_ : Dict = np.shape(data[i] )
lowerCamelCase_ : Any = data[i].reshape(1 , shapes[0] * shapes[1] )
lowerCamelCase_ : Union[str, Any] = data_listed.getA().tolist()[0]
data_expanded.extend(_UpperCAmelCase )
lowerCamelCase_ : List[str] = np.asarray(_UpperCAmelCase )
return data_expanded
def _lowerCAmelCase ( self : List[Any] , __a : str ) ->List[str]:
# expanding matrix to one dimension list
lowerCamelCase_ : List[Any] = np.asarray(_UpperCAmelCase )
lowerCamelCase_ : Dict = np.shape(_UpperCAmelCase )
lowerCamelCase_ : List[Any] = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def _lowerCAmelCase ( self : Optional[int] , __a : Optional[Any] , __a : str , __a : Dict , __a : Optional[Any] , __a : Any ) ->Union[str, Any]:
lowerCamelCase_ : str = []
lowerCamelCase_ : Tuple = 0
for i_map in range(_UpperCAmelCase ):
lowerCamelCase_ : Optional[int] = np.ones((size_map, size_map) )
for i in range(0 , _UpperCAmelCase , _UpperCAmelCase ):
for j in range(0 , _UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase_ : int = pd_pool[
i_pool
]
lowerCamelCase_ : List[Any] = i_pool + 1
lowerCamelCase_ : Optional[Any] = np.multiply(
_UpperCAmelCase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(_UpperCAmelCase )
return pd_all
def _lowerCAmelCase ( self : Optional[int] , __a : Optional[int] , __a : str , __a : int , __a : Optional[Any] , __a : Any , __a : Tuple=bool ) ->Any:
# model traning
print("""----------------------Start Training-------------------------""" )
print((""" - - Shape: Train_Data """, np.shape(_UpperCAmelCase )) )
print((""" - - Shape: Teach_Data """, np.shape(_UpperCAmelCase )) )
lowerCamelCase_ : List[str] = 0
lowerCamelCase_ : Union[str, Any] = []
lowerCamelCase_ : Any = 10_000
while rp < n_repeat and mse >= error_accuracy:
lowerCamelCase_ : List[str] = 0
print(F'''-------------Learning Time {rp}--------------''' )
for p in range(len(_UpperCAmelCase ) ):
# print('------------Learning Image: %d--------------'%p)
lowerCamelCase_ : str = np.asmatrix(datas_train[p] )
lowerCamelCase_ : Tuple = np.asarray(datas_teach[p] )
lowerCamelCase_, lowerCamelCase_ : int = self.convolute(
_UpperCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowerCamelCase_ : str = self.pooling(_UpperCAmelCase , self.size_poolinga )
lowerCamelCase_ : int = np.shape(_UpperCAmelCase )
lowerCamelCase_ : List[Any] = self._expand(_UpperCAmelCase )
lowerCamelCase_ : Any = data_bp_input
lowerCamelCase_ : Union[str, Any] = np.dot(_UpperCAmelCase , self.vji.T ) - self.thre_bpa
lowerCamelCase_ : Union[str, Any] = self.sig(_UpperCAmelCase )
lowerCamelCase_ : Union[str, Any] = np.dot(_UpperCAmelCase , self.wkj.T ) - self.thre_bpa
lowerCamelCase_ : str = self.sig(_UpperCAmelCase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowerCamelCase_ : Optional[Any] = np.multiply(
(data_teach - bp_outa) , np.multiply(_UpperCAmelCase , (1 - bp_outa) ) )
lowerCamelCase_ : str = np.multiply(
np.dot(_UpperCAmelCase , self.wkj ) , np.multiply(_UpperCAmelCase , (1 - bp_outa) ) )
lowerCamelCase_ : Any = np.dot(_UpperCAmelCase , self.vji )
lowerCamelCase_ : Any = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowerCamelCase_ : Dict = pd_conva_pooled.T.getA().tolist()
lowerCamelCase_ : int = self._calculate_gradient_from_pool(
_UpperCAmelCase , _UpperCAmelCase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowerCamelCase_ : Tuple = self._expand_mat(pd_conva_all[k_conv] )
lowerCamelCase_ : Dict = self.rate_weight * np.dot(_UpperCAmelCase , _UpperCAmelCase )
lowerCamelCase_ : Optional[int] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowerCamelCase_ : List[Any] = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowerCamelCase_ : Union[str, Any] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowerCamelCase_ : Any = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowerCamelCase_ : Dict = self.thre_bpa - pd_k_all * self.rate_thre
lowerCamelCase_ : Any = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowerCamelCase_ : Union[str, Any] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowerCamelCase_ : Optional[int] = rp + 1
lowerCamelCase_ : List[Any] = error_count / patterns
all_mse.append(_UpperCAmelCase )
def draw_error():
lowerCamelCase_ : Union[str, Any] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(_UpperCAmelCase , """+-""" )
plt.plot(_UpperCAmelCase , """r--""" )
plt.xlabel("""Learning Times""" )
plt.ylabel("""All_mse""" )
plt.grid(_UpperCAmelCase , alpha=0.5 )
plt.show()
print("""------------------Training Complished---------------------""" )
print((""" - - Training epoch: """, rp, F''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def _lowerCAmelCase ( self : Any , __a : Any ) ->Optional[Any]:
# model predict
lowerCamelCase_ : int = []
print("""-------------------Start Testing-------------------------""" )
print((""" - - Shape: Test_Data """, np.shape(_UpperCAmelCase )) )
for p in range(len(_UpperCAmelCase ) ):
lowerCamelCase_ : Any = np.asmatrix(datas_test[p] )
lowerCamelCase_, lowerCamelCase_ : Optional[Any] = self.convolute(
_UpperCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowerCamelCase_ : Union[str, Any] = self.pooling(_UpperCAmelCase , self.size_poolinga )
lowerCamelCase_ : Optional[Any] = self._expand(_UpperCAmelCase )
lowerCamelCase_ : Union[str, Any] = data_bp_input
lowerCamelCase_ : Tuple = bp_outa * self.vji.T - self.thre_bpa
lowerCamelCase_ : Tuple = self.sig(_UpperCAmelCase )
lowerCamelCase_ : Dict = bp_outa * self.wkj.T - self.thre_bpa
lowerCamelCase_ : Union[str, Any] = self.sig(_UpperCAmelCase )
produce_out.extend(bp_outa.getA().tolist() )
lowerCamelCase_ : str = [list(map(self.do_round , _UpperCAmelCase ) ) for each in produce_out]
return np.asarray(_UpperCAmelCase )
def _lowerCAmelCase ( self : Optional[Any] , __a : Optional[int] ) ->Union[str, Any]:
# return the data of image after convoluting process so we can check it out
lowerCamelCase_ : Any = np.asmatrix(_UpperCAmelCase )
lowerCamelCase_, lowerCamelCase_ : List[Any] = self.convolute(
_UpperCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowerCamelCase_ : int = self.pooling(_UpperCAmelCase , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 278 |
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def _snake_case (__lowercase=32 , __lowercase=10 , __lowercase=100 , __lowercase=1026 , __lowercase=True , __lowercase="data/tokenized_stories_train_wikitext103.jbl" , __lowercase="igf_context_pairs.jbl" , ):
set_seed(3)
# generate train_data and objective_set
UpperCamelCase_ , UpperCamelCase_ = generate_datasets(
__lowercase , __lowercase , number=__lowercase , min_len=1026 , trim=__lowercase)
# keeps model same across runs
set_seed(4)
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
UpperCamelCase_ = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# load pretrained model
UpperCamelCase_ = load_gpta('gpt2').to(__lowercase)
print('computing perplexity on objective set')
UpperCamelCase_ = compute_perplexity(__lowercase , __lowercase , __lowercase).item()
print('perplexity on objective set:' , __lowercase)
# collect igf pairs and save to file demo.jbl
collect_objective_set(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase)
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def _snake_case (__lowercase , __lowercase=15 , __lowercase=128 , __lowercase=100 , __lowercase="igf_model.pt" , ):
set_seed(42)
# Load pre-trained model
UpperCamelCase_ = GPTaLMHeadModel.from_pretrained('gpt2')
# Initialize secondary learner to use embedding weights of model
UpperCamelCase_ = SecondaryLearner(__lowercase)
# Train secondary learner
UpperCamelCase_ = train_secondary_learner(
__lowercase , __lowercase , max_epochs=__lowercase , batch_size=__lowercase , eval_freq=100 , igf_model_path=__lowercase , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase=32 , __lowercase=1000 , __lowercase=16 , __lowercase=1.0 , __lowercase=recopy_gpta , __lowercase=None , __lowercase=10 , __lowercase="gpt2_finetuned.pt" , ):
UpperCamelCase_ = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
UpperCamelCase_ = RandomSampler(__lowercase)
UpperCamelCase_ = DataLoader(__lowercase , sampler=__lowercase)
UpperCamelCase_ = max_steps // (len(__lowercase)) + 1
UpperCamelCase_ = 0
UpperCamelCase_ = torch.zeros((1, context_len) , dtype=torch.long , device=__lowercase)
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = recopy_model(__lowercase , __lowercase , __lowercase)
model.train()
if secondary_learner is not None:
secondary_learner.to(__lowercase)
secondary_learner.eval()
UpperCamelCase_ = []
UpperCamelCase_ = 0
UpperCamelCase_ = []
UpperCamelCase_ = []
# Compute the performance of the transformer model at the beginning
UpperCamelCase_ = compute_perplexity(__lowercase , __lowercase , __lowercase)
test_perps.append(__lowercase)
print('Test perplexity, step' , __lowercase , ':' , __lowercase)
for epoch in range(int(__lowercase)):
for step, example in enumerate(__lowercase):
torch.cuda.empty_cache()
UpperCamelCase_ = random.randint(0 , example.size(2) - context_len - 1)
UpperCamelCase_ = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
UpperCamelCase_ = model(__lowercase , labels=__lowercase)
UpperCamelCase_ = True
if secondary_learner is not None:
UpperCamelCase_ = secondary_learner.forward(
torch.tensor(__lowercase , dtype=torch.long , device=__lowercase).unsqueeze(0))[0].item()
observed_qs.append(float(__lowercase))
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
UpperCamelCase_ = -1
if predicted_q < threshold:
UpperCamelCase_ = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu()))
UpperCamelCase_ = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
UpperCamelCase_ = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0)
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
UpperCamelCase_ = compute_perplexity(__lowercase , __lowercase , __lowercase)
test_perps.append(__lowercase)
print('Test perplexity, step' , __lowercase , ':' , __lowercase)
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , __lowercase)
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def _snake_case ():
UpperCamelCase_ = argparse.ArgumentParser(description='Fine-tune a transformer model with IGF on a language modeling task')
# Required parameters
parser.add_argument(
'--data_dir' , default=__lowercase , type=__lowercase , required=__lowercase , help='The input data dir. Should contain data files for WikiText.' , )
parser.add_argument(
'--model_name_or_path' , default=__lowercase , type=__lowercase , required=__lowercase , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--data_file' , type=__lowercase , default=__lowercase , help=(
'A jbl file containing tokenized data which can be split as objective dataset, '
'train_dataset and test_dataset.'
) , )
parser.add_argument(
'--igf_data_file' , type=__lowercase , default=__lowercase , help='A jbl file containing the context and information gain pairs to train secondary learner.' , )
parser.add_argument(
'--output_dir' , default=__lowercase , type=__lowercase , required=__lowercase , help='The output directory where the final fine-tuned model is stored.' , )
parser.add_argument(
'--tokenizer_name' , default=__lowercase , type=__lowercase , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument('--seed' , type=__lowercase , default=__lowercase , help='A seed for reproducible training.')
parser.add_argument(
'--context_len' , default=32 , type=__lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--size_objective_set' , default=100 , type=__lowercase , help='number of articles that are long enough to be used as our objective set' , )
parser.add_argument(
'--eval_freq' , default=100 , type=__lowercase , help='secondary model evaluation is triggered at eval_freq')
parser.add_argument('--max_steps' , default=1000 , type=__lowercase , help='To calculate training epochs')
parser.add_argument(
'--secondary_learner_batch_size' , default=128 , type=__lowercase , help='batch size of training data for secondary learner' , )
parser.add_argument(
'--batch_size' , default=16 , type=__lowercase , help='batch size of training data of language model(gpt2) ')
parser.add_argument(
'--eval_interval' , default=10 , type=__lowercase , help=(
'decay the selectivity of our secondary learner filter from'
'1 standard deviation above average to 1 below average after 10 batches'
) , )
parser.add_argument(
'--number' , default=100 , type=__lowercase , help='The number of examples split to be used as objective_set/test_data')
parser.add_argument(
'--min_len' , default=1026 , type=__lowercase , help='The minimum length of the article to be used as objective set')
parser.add_argument(
'--secondary_learner_max_epochs' , default=15 , type=__lowercase , help='number of epochs to train secondary learner')
parser.add_argument('--trim' , default=__lowercase , type=__lowercase , help='truncate the example if it exceeds context length')
parser.add_argument(
'--threshold' , default=1.0 , type=__lowercase , help=(
'The threshold value used by secondary learner to filter the train_data and allow only'
' informative data as input to the model'
) , )
parser.add_argument('--finetuned_model_name' , default='gpt2_finetuned.pt' , type=__lowercase , help='finetuned_model_name')
parser.add_argument(
'--recopy_model' , default=__lowercase , type=__lowercase , help='Reset the model to the original pretrained GPT-2 weights after each iteration' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=__lowercase , data_file='data/tokenized_stories_train_wikitext103.jbl' , igf_data_file='igf_context_pairs.jbl' , )
# Load train data for secondary learner
UpperCamelCase_ = joblib.load('data/IGF_values.jbl')
# Train secondary learner
UpperCamelCase_ = training_secondary_learner(
__lowercase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='igf_model.pt' , )
# load pretrained gpt2 model
UpperCamelCase_ = GPTaLMHeadModel.from_pretrained('gpt2')
set_seed(42)
# Generate train and test data to train and evaluate gpt2 model
UpperCamelCase_ , UpperCamelCase_ = generate_datasets(
context_len=32 , file='data/tokenized_stories_train_wikitext103.jbl' , number=100 , min_len=1026 , trim=__lowercase)
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
__lowercase , __lowercase , __lowercase , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=__lowercase , secondary_learner=__lowercase , eval_interval=10 , finetuned_model_name='gpt2_finetuned.pt' , )
if __name__ == "__main__":
main()
| 23 | 0 |
'''simple docstring'''
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(""">=""", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
lowerCAmelCase_ : List[str] = get_logger(__name__)
def __A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase=0 ) -> Union[str, Any]:
'''simple docstring'''
os.makedirs(__lowercase ,exist_ok=__lowercase )
with FSDP.state_dict_type(
__lowercase ,fsdp_plugin.state_dict_type ,fsdp_plugin.state_dict_config ,fsdp_plugin.optim_state_dict_config ):
_UpperCamelCase : Tuple = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
_UpperCamelCase : Optional[int] = f'''{MODEL_NAME}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}.bin'''
_UpperCamelCase : Union[str, Any] = os.path.join(__lowercase ,__lowercase )
if accelerator.process_index == 0:
logger.info(f'''Saving model to {output_model_file}''' )
torch.save(__lowercase ,__lowercase )
logger.info(f'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
_UpperCamelCase : int = (
f'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else f'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
_UpperCamelCase : Dict = os.path.join(__lowercase ,__lowercase )
logger.info(f'''Saving model to {output_model_file}''' )
torch.save(__lowercase ,__lowercase )
logger.info(f'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
_UpperCamelCase : Optional[int] = os.path.join(__lowercase ,f'''{MODEL_NAME}_{model_index}''' )
os.makedirs(__lowercase ,exist_ok=__lowercase )
logger.info(f'''Saving model to {ckpt_dir}''' )
_UpperCamelCase : Tuple = {"model": state_dict}
dist_cp.save_state_dict(
state_dict=__lowercase ,storage_writer=dist_cp.FileSystemWriter(__lowercase ) ,planner=DefaultSavePlanner() ,)
logger.info(f'''Model saved to {ckpt_dir}''' )
def __A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase=0 ) -> Any:
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__lowercase ,fsdp_plugin.state_dict_type ,fsdp_plugin.state_dict_config ,fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(__lowercase ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"Set the `sync_module_states` flag to `True` so that model states are synced across processes when "
"initializing FSDP object" )
return
_UpperCamelCase : Any = f'''{MODEL_NAME}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}.bin'''
_UpperCamelCase : Any = os.path.join(__lowercase ,__lowercase )
logger.info(f'''Loading model from {input_model_file}''' )
_UpperCamelCase : Optional[int] = torch.load(__lowercase )
logger.info(f'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
_UpperCamelCase : List[Any] = (
f'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else f'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
_UpperCamelCase : Union[str, Any] = os.path.join(__lowercase ,__lowercase )
logger.info(f'''Loading model from {input_model_file}''' )
_UpperCamelCase : Optional[Any] = torch.load(__lowercase )
logger.info(f'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
_UpperCamelCase : Tuple = (
os.path.join(__lowercase ,f'''{MODEL_NAME}_{model_index}''' )
if f'''{MODEL_NAME}''' not in input_dir
else input_dir
)
logger.info(f'''Loading model from {ckpt_dir}''' )
_UpperCamelCase : List[str] = {"model": model.state_dict()}
dist_cp.load_state_dict(
state_dict=__lowercase ,storage_reader=dist_cp.FileSystemReader(__lowercase ) ,planner=DefaultLoadPlanner() ,)
_UpperCamelCase : int = state_dict["model"]
logger.info(f'''Model loaded from {ckpt_dir}''' )
model.load_state_dict(__lowercase )
def __A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase=0 ) -> Any:
'''simple docstring'''
os.makedirs(__lowercase ,exist_ok=__lowercase )
with FSDP.state_dict_type(
__lowercase ,fsdp_plugin.state_dict_type ,fsdp_plugin.state_dict_config ,fsdp_plugin.optim_state_dict_config ):
_UpperCamelCase : List[Any] = FSDP.optim_state_dict(__lowercase ,__lowercase )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
_UpperCamelCase : Any = (
f'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else f'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
_UpperCamelCase : Any = os.path.join(__lowercase ,__lowercase )
logger.info(f'''Saving Optimizer state to {output_optimizer_file}''' )
torch.save(__lowercase ,__lowercase )
logger.info(f'''Optimizer state saved in {output_optimizer_file}''' )
else:
_UpperCamelCase : List[Any] = os.path.join(__lowercase ,f'''{OPTIMIZER_NAME}_{optimizer_index}''' )
os.makedirs(__lowercase ,exist_ok=__lowercase )
logger.info(f'''Saving Optimizer state to {ckpt_dir}''' )
dist_cp.save_state_dict(
state_dict={"optimizer": optim_state} ,storage_writer=dist_cp.FileSystemWriter(__lowercase ) ,planner=DefaultSavePlanner() ,)
logger.info(f'''Optimizer state saved in {ckpt_dir}''' )
def __A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase=0 ) -> Optional[Any]:
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__lowercase ,fsdp_plugin.state_dict_type ,fsdp_plugin.state_dict_config ,fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
_UpperCamelCase : Tuple = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
_UpperCamelCase : Optional[Any] = (
f'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else f'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
_UpperCamelCase : Optional[Any] = os.path.join(__lowercase ,__lowercase )
logger.info(f'''Loading Optimizer state from {input_optimizer_file}''' )
_UpperCamelCase : int = torch.load(__lowercase )
logger.info(f'''Optimizer state loaded from {input_optimizer_file}''' )
else:
_UpperCamelCase : Tuple = (
os.path.join(__lowercase ,f'''{OPTIMIZER_NAME}_{optimizer_index}''' )
if f'''{OPTIMIZER_NAME}''' not in input_dir
else input_dir
)
logger.info(f'''Loading Optimizer from {ckpt_dir}''' )
_UpperCamelCase : List[Any] = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() ,optimizer_key="optimizer" ,storage_reader=dist_cp.FileSystemReader(__lowercase ) ,)
_UpperCamelCase : Optional[Any] = optim_state["optimizer"]
logger.info(f'''Optimizer loaded from {ckpt_dir}''' )
_UpperCamelCase : Union[str, Any] = FSDP.optim_state_dict_to_load(__lowercase ,__lowercase ,__lowercase )
optimizer.load_state_dict(__lowercase )
| 435 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class _a :
"""simple docstring"""
A_ = MBartConfig
A_ = {}
A_ = """gelu"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=20 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , ) -> Union[str, Any]:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = seq_length
UpperCamelCase_ = is_training
UpperCamelCase_ = use_labels
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = eos_token_id
UpperCamelCase_ = pad_token_id
UpperCamelCase_ = bos_token_id
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCamelCase_ = prepare_mbart_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return config, inputs_dict
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = TFMBartModel(config=_UpperCAmelCase ).get_decoder()
UpperCamelCase_ = inputs_dict['input_ids']
UpperCamelCase_ = input_ids[:1, :]
UpperCamelCase_ = inputs_dict['attention_mask'][:1, :]
UpperCamelCase_ = inputs_dict['head_mask']
UpperCamelCase_ = 1
# first forward pass
UpperCamelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = outputs.to_tuple()
UpperCamelCase_ = past_key_values[1]
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , ):
if attention_mask is None:
UpperCamelCase_ = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id) , tf.inta)
if decoder_attention_mask is None:
UpperCamelCase_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id) , tf.inta),
] , axis=-1 , )
if head_mask is None:
UpperCamelCase_ = tf.ones((config.encoder_layers, config.encoder_attention_heads))
if decoder_head_mask is None:
UpperCamelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads))
if cross_attn_head_mask is None:
UpperCamelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads))
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
A_ = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
A_ = (
{
"""conversational""": TFMBartForConditionalGeneration,
"""feature-extraction""": TFMBartModel,
"""summarization""": TFMBartForConditionalGeneration,
"""text2text-generation""": TFMBartForConditionalGeneration,
"""translation""": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
A_ = True
A_ = False
A_ = False
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = TFMBartModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
A_ = [
""" UN Chief Says There Is No Military Solution in Syria""",
]
A_ = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
]
A_ = """facebook/mbart-large-en-ro"""
@cached_property
def _UpperCAmelCase ( self ) -> Any:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _UpperCAmelCase ( self , **_UpperCAmelCase ) -> int:
UpperCamelCase_ = self.translate_src_text(**_UpperCAmelCase )
self.assertListEqual(self.expected_text , _UpperCAmelCase )
def _UpperCAmelCase ( self , **_UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = self.tokenizer(self.src_text , **_UpperCAmelCase , return_tensors='tf' )
UpperCamelCase_ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
UpperCamelCase_ = self.tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
return generated_words
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
self._assert_generated_batch_equal_expected()
| 23 | 0 |
'''simple docstring'''
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def A_( A : Any , A : Optional[int]=() , A : int=None , A : Union[str, Any]="no" , A : Optional[Any]="29500"):
UpperCamelCase = False
UpperCamelCase = False
if any(key.startswith('KAGGLE') for key in os.environ.keys()):
UpperCamelCase = True
elif "IPython" in sys.modules:
UpperCamelCase = 'google.colab' in str(sys.modules['IPython'].get_ipython())
try:
UpperCamelCase = PrecisionType(mixed_precision.lower())
except ValueError:
raise ValueError(
f'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''')
if (in_colab or in_kaggle) and (os.environ.get('TPU_NAME' , __lowercase) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state) > 0:
raise ValueError(
'To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '
'your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.')
if num_processes is None:
UpperCamelCase = 8
UpperCamelCase = PrepareForLaunch(__lowercase , distributed_type='TPU')
print(f'''Launching a training on {num_processes} TPU cores.''')
xmp.spawn(__lowercase , args=__lowercase , nprocs=__lowercase , start_method='fork')
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('Launching training on one GPU.')
else:
print('Launching training on one CPU.')
function(*__lowercase)
else:
if num_processes is None:
raise ValueError(
'You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.')
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state) > 0:
raise ValueError(
'To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '
'inside your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.')
if torch.cuda.is_initialized():
raise ValueError(
'To launch a multi-GPU training from your notebook, you need to avoid running any instruction '
'using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '
'function.')
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__lowercase , master_addr='127.0.01' , master_port=__lowercase , mixed_precision=__lowercase):
UpperCamelCase = PrepareForLaunch(__lowercase , distributed_type='MULTI_GPU')
print(f'''Launching training on {num_processes} GPUs.''')
try:
start_processes(__lowercase , args=__lowercase , nprocs=__lowercase , start_method='fork')
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '
'This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '
'Please review your imports and test them when running the `notebook_launcher()` to identify '
'which one is problematic.') from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
UpperCamelCase = '1'
print('Launching training on MPS.')
elif torch.cuda.is_available():
print('Launching training on one GPU.')
else:
print('Launching training on CPU.')
function(*__lowercase)
def A_( A : int , A : Union[str, Any]=() , A : List[str]=2):
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__lowercase , master_addr='127.0.01' , master_port='29500' , accelerate_mixed_precision='no' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='yes' , ):
UpperCamelCase = PrepareForLaunch(__lowercase , debug=__lowercase)
start_processes(__lowercase , args=__lowercase , nprocs=__lowercase , start_method='fork')
| 3 |
def _snake_case (__lowercase):
UpperCamelCase_ = 1
for i in range(1 , num + 1):
fact *= i
return fact
def _snake_case (__lowercase):
UpperCamelCase_ = 0
while number > 0:
UpperCamelCase_ = number % 10
sum_of_digits += last_digit
UpperCamelCase_ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def _snake_case (__lowercase = 100):
UpperCamelCase_ = factorial(__lowercase)
UpperCamelCase_ = split_and_add(__lowercase)
return result
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 23 | 0 |
'''simple docstring'''
import torch
from torch import nn
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : Any , a_ : Dict , a_ : List[str] , a_ : List[str] , a_ : Any , a_ : Dict=1 , a_ : List[str]=False ):
"""simple docstring"""
super().__init__()
__snake_case = n_token
__snake_case = d_embed
__snake_case = d_proj
__snake_case = cutoffs + [n_token]
__snake_case = [0] + self.cutoffs
__snake_case = div_val
__snake_case = self.cutoffs[0]
__snake_case = len(self.cutoffs ) - 1
__snake_case = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
__snake_case = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
__snake_case = nn.Parameter(torch.zeros(self.n_clusters ) )
__snake_case = nn.ModuleList()
__snake_case = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(_UpperCAmelCase , _UpperCAmelCase ) ) )
else:
self.out_projs.append(_UpperCAmelCase )
self.out_layers.append(nn.Linear(_UpperCAmelCase , _UpperCAmelCase ) )
else:
for i in range(len(self.cutoffs ) ):
__snake_case , __snake_case = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__snake_case = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(_UpperCAmelCase , _UpperCAmelCase ) ) )
self.out_layers.append(nn.Linear(_UpperCAmelCase , r_idx - l_idx ) )
__snake_case = keep_order
def A ( self : Tuple , a_ : str , a_ : Tuple , a_ : int , a_ : Union[str, Any] ):
"""simple docstring"""
if proj is None:
__snake_case = nn.functional.linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
__snake_case = nn.functional.linear(_UpperCAmelCase , proj.t().contiguous() )
__snake_case = nn.functional.linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def A ( self : Dict , a_ : Dict , a_ : int=None , a_ : List[str]=False ):
"""simple docstring"""
if labels is not None:
# Shift so that tokens < n predict n
__snake_case = hidden[..., :-1, :].contiguous()
__snake_case = labels[..., 1:].contiguous()
__snake_case = hidden.view(-1 , hidden.size(-1 ) )
__snake_case = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("Input and labels should have the same size in the batch dimension." )
else:
__snake_case = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
__snake_case = self._compute_logit(_UpperCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
__snake_case = labels != -100
__snake_case = torch.zeros_like(_UpperCAmelCase , dtype=hidden.dtype , device=hidden.device )
__snake_case = (
-nn.functional.log_softmax(_UpperCAmelCase , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
__snake_case = nn.functional.log_softmax(_UpperCAmelCase , dim=-1 )
else:
# construct weights and biases
__snake_case , __snake_case = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
__snake_case , __snake_case = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__snake_case = self.out_layers[0].weight[l_idx:r_idx]
__snake_case = self.out_layers[0].bias[l_idx:r_idx]
else:
__snake_case = self.out_layers[i].weight
__snake_case = self.out_layers[i].bias
if i == 0:
__snake_case = torch.cat([weight_i, self.cluster_weight] , dim=0 )
__snake_case = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(_UpperCAmelCase )
biases.append(_UpperCAmelCase )
__snake_case , __snake_case , __snake_case = weights[0], biases[0], self.out_projs[0]
__snake_case = self._compute_logit(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__snake_case = nn.functional.log_softmax(_UpperCAmelCase , dim=1 )
if labels is None:
__snake_case = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
__snake_case = torch.zeros_like(_UpperCAmelCase , dtype=hidden.dtype , device=hidden.device )
__snake_case = 0
__snake_case = [0] + self.cutoffs
for i in range(len(_UpperCAmelCase ) - 1 ):
__snake_case , __snake_case = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
__snake_case = (labels >= l_idx) & (labels < r_idx)
__snake_case = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
__snake_case = labels.index_select(0 , _UpperCAmelCase ) - l_idx
__snake_case = head_logprob.index_select(0 , _UpperCAmelCase )
__snake_case = hidden.index_select(0 , _UpperCAmelCase )
else:
__snake_case = hidden
if i == 0:
if labels is not None:
__snake_case = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
__snake_case = head_logprob[:, : self.cutoffs[0]]
else:
__snake_case , __snake_case , __snake_case = weights[i], biases[i], self.out_projs[i]
__snake_case = self._compute_logit(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__snake_case = nn.functional.log_softmax(_UpperCAmelCase , dim=1 )
__snake_case = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
__snake_case = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
__snake_case = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
__snake_case = logprob_i
if labels is not None:
if (hasattr(self , "keep_order" ) and self.keep_order) or keep_order:
out.index_copy_(0 , _UpperCAmelCase , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def A ( self : List[str] , a_ : Optional[Any] ):
"""simple docstring"""
if self.n_clusters == 0:
__snake_case = self._compute_logit(_UpperCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(_UpperCAmelCase , dim=-1 )
else:
# construct weights and biases
__snake_case , __snake_case = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
__snake_case , __snake_case = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__snake_case = self.out_layers[0].weight[l_idx:r_idx]
__snake_case = self.out_layers[0].bias[l_idx:r_idx]
else:
__snake_case = self.out_layers[i].weight
__snake_case = self.out_layers[i].bias
if i == 0:
__snake_case = torch.cat([weight_i, self.cluster_weight] , dim=0 )
__snake_case = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(_UpperCAmelCase )
biases.append(_UpperCAmelCase )
__snake_case , __snake_case , __snake_case = weights[0], biases[0], self.out_projs[0]
__snake_case = self._compute_logit(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__snake_case = hidden.new_empty((head_logit.size(0 ), self.n_token) )
__snake_case = nn.functional.log_softmax(_UpperCAmelCase , dim=1 )
__snake_case = [0] + self.cutoffs
for i in range(len(_UpperCAmelCase ) - 1 ):
__snake_case , __snake_case = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
__snake_case = head_logprob[:, : self.cutoffs[0]]
else:
__snake_case , __snake_case , __snake_case = weights[i], biases[i], self.out_projs[i]
__snake_case = self._compute_logit(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__snake_case = nn.functional.log_softmax(_UpperCAmelCase , dim=1 )
__snake_case = head_logprob[:, -i] + tail_logprob_i
__snake_case = logprob_i
return out
| 69 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
snake_case__ : str = logging.get_logger(__name__)
def _snake_case (__lowercase):
if isinstance(__lowercase , (list, tuple)) and isinstance(videos[0] , (list, tuple)) and is_valid_image(videos[0][0]):
return videos
elif isinstance(__lowercase , (list, tuple)) and is_valid_image(videos[0]):
return [videos]
elif is_valid_image(__lowercase):
return [[videos]]
raise ValueError(f"""Could not make batched video from {videos}""")
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = ["""pixel_values"""]
def __init__( self , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = PILImageResampling.BILINEAR , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 255 , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> None:
super().__init__(**_UpperCAmelCase )
UpperCamelCase_ = size if size is not None else {'shortest_edge': 224}
UpperCamelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
UpperCamelCase_ = crop_size if crop_size is not None else {'height': 224, 'width': 224}
UpperCamelCase_ = get_size_dict(_UpperCAmelCase , param_name='crop_size' )
UpperCamelCase_ = do_resize
UpperCamelCase_ = size
UpperCamelCase_ = do_center_crop
UpperCamelCase_ = crop_size
UpperCamelCase_ = resample
UpperCamelCase_ = do_rescale
UpperCamelCase_ = rescale_factor
UpperCamelCase_ = do_normalize
UpperCamelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = PILImageResampling.BILINEAR , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> np.ndarray:
UpperCamelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
if "shortest_edge" in size:
UpperCamelCase_ = get_resize_output_image_size(_UpperCAmelCase , size['shortest_edge'] , default_to_square=_UpperCAmelCase )
elif "height" in size and "width" in size:
UpperCamelCase_ = (size['height'], size['width'])
else:
raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> np.ndarray:
UpperCamelCase_ = get_size_dict(_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(_UpperCAmelCase , size=(size['height'], size['width']) , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> int:
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> np.ndarray:
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
UpperCamelCase_ = to_numpy_array(_UpperCAmelCase )
if do_resize:
UpperCamelCase_ = self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase )
if do_center_crop:
UpperCamelCase_ = self.center_crop(_UpperCAmelCase , size=_UpperCAmelCase )
if do_rescale:
UpperCamelCase_ = self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase )
if do_normalize:
UpperCamelCase_ = self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase )
UpperCamelCase_ = to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase )
return image
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , **_UpperCAmelCase , ) -> PIL.Image.Image:
UpperCamelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase_ = resample if resample is not None else self.resample
UpperCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase_ = image_std if image_std is not None else self.image_std
UpperCamelCase_ = size if size is not None else self.size
UpperCamelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
UpperCamelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase_ = get_size_dict(_UpperCAmelCase , param_name='crop_size' )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
UpperCamelCase_ = make_batched(_UpperCAmelCase )
UpperCamelCase_ = [
[
self._preprocess_image(
image=_UpperCAmelCase , do_resize=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , do_center_crop=_UpperCAmelCase , crop_size=_UpperCAmelCase , do_rescale=_UpperCAmelCase , rescale_factor=_UpperCAmelCase , do_normalize=_UpperCAmelCase , image_mean=_UpperCAmelCase , image_std=_UpperCAmelCase , data_format=_UpperCAmelCase , )
for img in video
]
for video in videos
]
UpperCamelCase_ = {'pixel_values': videos}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
| 23 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : Dict = {"""configuration_sew""": ["""SEW_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SEWConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[str] = [
"""SEW_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SEWForCTC""",
"""SEWForSequenceClassification""",
"""SEWModel""",
"""SEWPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
A__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 171 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = 42
A_ = 42
class _a ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
A_ = 1
@register_to_config
def __init__( self , _UpperCAmelCase = 2000 , _UpperCAmelCase = 0.1_5 , _UpperCAmelCase = 0.0_1 , _UpperCAmelCase = 1_3_4_8.0 , _UpperCAmelCase = 1e-5 , _UpperCAmelCase = 1 , ) -> Tuple:
# standard deviation of the initial noise distribution
UpperCamelCase_ = sigma_max
# setable values
UpperCamelCase_ = None
self.set_sigmas(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> torch.FloatTensor:
return sample
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None ) -> str:
UpperCamelCase_ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
UpperCamelCase_ = torch.linspace(1 , _UpperCAmelCase , _UpperCAmelCase , device=_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None ) -> Any:
UpperCamelCase_ = sigma_min if sigma_min is not None else self.config.sigma_min
UpperCamelCase_ = sigma_max if sigma_max is not None else self.config.sigma_max
UpperCamelCase_ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
UpperCamelCase_ = torch.exp(torch.linspace(math.log(_UpperCAmelCase ) , math.log(_UpperCAmelCase ) , _UpperCAmelCase ) )
UpperCamelCase_ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = True , ) -> Union[SdeVeOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
UpperCamelCase_ = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
UpperCamelCase_ = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
UpperCamelCase_ = timesteps.to(self.discrete_sigmas.device )
UpperCamelCase_ = self.discrete_sigmas[timesteps].to(sample.device )
UpperCamelCase_ = self.get_adjacent_sigma(_UpperCAmelCase , _UpperCAmelCase ).to(sample.device )
UpperCamelCase_ = torch.zeros_like(_UpperCAmelCase )
UpperCamelCase_ = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
UpperCamelCase_ = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
UpperCamelCase_ = diffusion.unsqueeze(-1 )
UpperCamelCase_ = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
UpperCamelCase_ = randn_tensor(
sample.shape , layout=sample.layout , generator=_UpperCAmelCase , device=sample.device , dtype=sample.dtype )
UpperCamelCase_ = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
UpperCamelCase_ = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=_UpperCAmelCase , prev_sample_mean=_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = True , ) -> Union[SchedulerOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
UpperCamelCase_ = randn_tensor(sample.shape , layout=sample.layout , generator=_UpperCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
UpperCamelCase_ = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
UpperCamelCase_ = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
UpperCamelCase_ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
UpperCamelCase_ = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
UpperCamelCase_ = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
UpperCamelCase_ = step_size.unsqueeze(-1 )
UpperCamelCase_ = sample + step_size * model_output
UpperCamelCase_ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
UpperCamelCase_ = timesteps.to(original_samples.device )
UpperCamelCase_ = self.discrete_sigmas.to(original_samples.device )[timesteps]
UpperCamelCase_ = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(_UpperCAmelCase ) * sigmas[:, None, None, None]
)
UpperCamelCase_ = noise + original_samples
return noisy_samples
def __len__( self ) -> Optional[int]:
return self.config.num_train_timesteps
| 23 | 0 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( __A : Union[str, Any] , __A : Optional[Any] , __A : List[str] ) -> Dict:
'''simple docstring'''
snake_case : str = MobileBertConfig.from_json_file(__lowercase )
print(f"""Building PyTorch model from configuration: {config}""" )
snake_case : Any = MobileBertForPreTraining(__lowercase )
# Load weights from tf checkpoint
snake_case : Optional[int] = load_tf_weights_in_mobilebert(__lowercase , __lowercase , __lowercase )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , __lowercase )
if __name__ == "__main__":
__lowercase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--mobilebert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained MobileBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__lowercase : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 36 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ : Optional[int] = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Dict = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
snake_case__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 23 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ : Dict = {
"""configuration_megatron_bert""": ["""MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegatronBertConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : str = [
"""MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegatronBertForCausalLM""",
"""MegatronBertForMaskedLM""",
"""MegatronBertForMultipleChoice""",
"""MegatronBertForNextSentencePrediction""",
"""MegatronBertForPreTraining""",
"""MegatronBertForQuestionAnswering""",
"""MegatronBertForSequenceClassification""",
"""MegatronBertForTokenClassification""",
"""MegatronBertModel""",
"""MegatronBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
UpperCamelCase_ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 331 |
import datasets
from .evaluate import evaluate
snake_case__ : int = """\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
"""
snake_case__ : Union[str, Any] = """
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
"""
snake_case__ : Any = """
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the CUAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
'aupr': Area Under the Precision-Recall curve
'prec_at_80_recall': Precision at 80% recall
'prec_at_90_recall': Precision at 90% recall
Examples:
>>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> cuad_metric = datasets.load_metric(\"cuad\")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': {
'id': datasets.Value('string' ),
'prediction_text': datasets.features.Sequence(datasets.Value('string' ) ),
},
'references': {
'id': datasets.Value('string' ),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
},
} ) , codebase_urls=['https://www.atticusprojectai.org/cuad'] , reference_urls=['https://www.atticusprojectai.org/cuad'] , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
UpperCamelCase_ = {prediction['id']: prediction['prediction_text'] for prediction in predictions}
UpperCamelCase_ = [
{
'paragraphs': [
{
'qas': [
{
'answers': [{'text': answer_text} for answer_text in ref['answers']['text']],
'id': ref['id'],
}
for ref in references
]
}
]
}
]
UpperCamelCase_ = evaluate(dataset=_UpperCAmelCase , predictions=_UpperCAmelCase )
return score
| 23 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"""vocab_file""": """spiece.model"""}
lowerCAmelCase_ = {
"""vocab_file""": {
"""bert_for_seq_generation""": (
"""https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"""
),
}
}
lowerCAmelCase_ = {"""bert_for_seq_generation""": 5_1_2}
class A (UpperCAmelCase__ ):
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""]
def __init__( self , lowercase_ , lowercase_="<s>" , lowercase_="</s>" , lowercase_="<unk>" , lowercase_="<pad>" , lowercase_="<::::>" , lowercase_ = None , **lowercase_ , ) -> None:
'''simple docstring'''
_snake_case : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
_snake_case : Optional[int] = vocab_file
_snake_case : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCAmelCase )
@property
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.sp_model.get_piece_size()
def __a ( self ) -> int:
'''simple docstring'''
_snake_case : Union[str, Any] = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> str:
'''simple docstring'''
_snake_case : Dict = self.__dict__.copy()
_snake_case : List[Any] = None
return state
def __setstate__( self , lowercase_ ) -> List[str]:
'''simple docstring'''
_snake_case : List[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_snake_case : Any = {}
_snake_case : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __a ( self , lowercase_ ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
def __a ( self , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
return self.sp_model.piece_to_id(_UpperCAmelCase )
def __a ( self , lowercase_ ) -> Optional[int]:
'''simple docstring'''
_snake_case : str = self.sp_model.IdToPiece(_UpperCAmelCase )
return token
def __a ( self , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
_snake_case : Optional[int] = []
_snake_case : Optional[int] = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_UpperCAmelCase ) + token
_snake_case : Union[str, Any] = []
else:
current_sub_tokens.append(_UpperCAmelCase )
out_string += self.sp_model.decode(_UpperCAmelCase )
return out_string.strip()
def __a ( self , lowercase_ , lowercase_ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_UpperCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case : List[Any] = os.path.join(
_UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase , '''wb''' ) as fi:
_snake_case : str = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (out_vocab_file,)
| 326 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class _a ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> List[str]:
return datasets.DatasetInfo(
features=datasets.Features({'content': datasets.Value('string' )} ) , supervised_keys=_UpperCAmelCase , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_dummy_examples()} )]
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase )
class _a ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Any:
return datasets.DatasetInfo(
features=datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) , supervised_keys=_UpperCAmelCase , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_nested_examples()} )
]
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase )
def _snake_case ():
return [(i, {"content": content}) for i, content in enumerate(['foo', 'bar', 'foobar'])]
def _snake_case ():
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['foo', 'bar', 'foobar'])]
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
@require_beam
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase_ = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
UpperCamelCase_ = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , _UpperCAmelCase )
self.assertEqual(dset['train'].info.splits['train'].num_examples , _UpperCAmelCase )
self.assertDictEqual(dset['train'][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def _UpperCAmelCase ( self ) -> List[str]:
import apache_beam as beam
UpperCamelCase_ = beam.io.parquetio.WriteToParquet
UpperCamelCase_ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase_ = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner='DirectRunner' )
with patch('apache_beam.io.parquetio.WriteToParquet' ) as write_parquet_mock:
UpperCamelCase_ = partial(_UpperCAmelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
UpperCamelCase_ = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , _UpperCAmelCase )
self.assertEqual(dset['train'].info.splits['train'].num_examples , _UpperCAmelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['train']['content'] ) , sorted(['foo', 'bar', 'foobar'] ) )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def _UpperCAmelCase ( self ) -> Any:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase_ = DummyBeamDataset(cache_dir=_UpperCAmelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase_ = NestedBeamDataset(cache_dir=_UpperCAmelCase , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) )
UpperCamelCase_ = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , _UpperCAmelCase )
self.assertEqual(dset['train'].info.splits['train'].num_examples , _UpperCAmelCase )
self.assertDictEqual(dset['train'][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
| 23 | 0 |
"""simple docstring"""
import numpy as np
def lowercase (snake_case__ : int ) -> Tuple:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def lowercase (snake_case__ : int ) -> Optional[int]:
'''simple docstring'''
return vector * sigmoid(__lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 169 |
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def _snake_case (__lowercase , __lowercase , __lowercase):
# Initialise PyTorch model
UpperCamelCase_ = AlbertConfig.from_json_file(__lowercase)
print(f"""Building PyTorch model from configuration: {config}""")
UpperCamelCase_ = AlbertForPreTraining(__lowercase)
# Load weights from tf checkpoint
load_tf_weights_in_albert(__lowercase , __lowercase , __lowercase)
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""")
torch.save(model.state_dict() , __lowercase)
if __name__ == "__main__":
snake_case__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--albert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained ALBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
snake_case__ : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 23 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/config.json""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/config.json""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"""
),
}
class lowerCAmelCase ( UpperCAmelCase__ ):
"""simple docstring"""
_A = 'xlm-roberta'
def __init__( self , _A=30522 , _A=768 , _A=12 , _A=12 , _A=3072 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=2 , _A=0.02 , _A=1E-1_2 , _A=1 , _A=0 , _A=2 , _A="absolute" , _A=True , _A=None , **_A , ) -> List[str]:
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__a : Optional[int] = vocab_size
__a : Any = hidden_size
__a : str = num_hidden_layers
__a : List[str] = num_attention_heads
__a : int = hidden_act
__a : int = intermediate_size
__a : Tuple = hidden_dropout_prob
__a : Any = attention_probs_dropout_prob
__a : Union[str, Any] = max_position_embeddings
__a : Dict = type_vocab_size
__a : Tuple = initializer_range
__a : Dict = layer_norm_eps
__a : Dict = position_embedding_type
__a : Optional[Any] = use_cache
__a : Tuple = classifier_dropout
class lowerCAmelCase ( UpperCAmelCase__ ):
"""simple docstring"""
@property
def __magic_name__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__a : Optional[int] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__a : Tuple = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 597 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
@slow
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' )
UpperCamelCase_ = BertTokenizer.from_pretrained('bert-base-uncased' )
UpperCamelCase_ = bertabert.config.encoder.vocab_size
UpperCamelCase_ = tokenizer.sep_token_id
UpperCamelCase_ = tokenizer.cls_token_id
UpperCamelCase_ = 128
UpperCamelCase_ = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' )
UpperCamelCase_ = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' )
UpperCamelCase_ = train_dataset.select(range(32 ) )
UpperCamelCase_ = val_dataset.select(range(16 ) )
UpperCamelCase_ = 4
def _map_to_encoder_decoder_inputs(_UpperCAmelCase ):
# Tokenizer will automatically set [BOS] <text> [EOS]
UpperCamelCase_ = tokenizer(batch['article'] , padding='max_length' , truncation=_UpperCAmelCase , max_length=512 )
UpperCamelCase_ = tokenizer(batch['highlights'] , padding='max_length' , truncation=_UpperCAmelCase , max_length=128 )
UpperCamelCase_ = inputs.input_ids
UpperCamelCase_ = inputs.attention_mask
UpperCamelCase_ = outputs.input_ids
UpperCamelCase_ = outputs.input_ids.copy()
UpperCamelCase_ = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
UpperCamelCase_ = outputs.attention_mask
assert all(len(_UpperCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(_UpperCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_UpperCAmelCase ):
UpperCamelCase_ = pred.label_ids
UpperCamelCase_ = pred.predictions
# all unnecessary tokens are removed
UpperCamelCase_ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
UpperCamelCase_ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
UpperCamelCase_ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_UpperCAmelCase ) )] ) / len(_UpperCAmelCase )
return {"accuracy": accuracy}
# map train dataset
UpperCamelCase_ = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_UpperCAmelCase , batch_size=_UpperCAmelCase , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
UpperCamelCase_ = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_UpperCAmelCase , batch_size=_UpperCAmelCase , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = SeqaSeqTrainingArguments(
output_dir=_UpperCAmelCase , per_device_train_batch_size=_UpperCAmelCase , per_device_eval_batch_size=_UpperCAmelCase , predict_with_generate=_UpperCAmelCase , evaluation_strategy='steps' , do_train=_UpperCAmelCase , do_eval=_UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
UpperCamelCase_ = SeqaSeqTrainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , tokenizer=_UpperCAmelCase , )
# start training
trainer.train()
| 23 | 0 |
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def __A ( _A ):
"""simple docstring"""
__a = SwinConfig(image_size=192 )
if "base" in model_name:
__a = 6
__a = 128
__a = (2, 2, 18, 2)
__a = (4, 8, 16, 32)
elif "large" in model_name:
__a = 12
__a = 192
__a = (2, 2, 18, 2)
__a = (6, 12, 24, 48)
else:
raise ValueError("Model not supported, only supports base and large variants" )
__a = window_size
__a = embed_dim
__a = depths
__a = num_heads
return config
def __A ( _A ):
"""simple docstring"""
if "encoder.mask_token" in name:
__a = name.replace("encoder.mask_token" , "embeddings.mask_token" )
if "encoder.patch_embed.proj" in name:
__a = name.replace("encoder.patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "encoder.patch_embed.norm" in name:
__a = name.replace("encoder.patch_embed.norm" , "embeddings.norm" )
if "attn.proj" in name:
__a = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
__a = name.replace("attn" , "attention.self" )
if "norm1" in name:
__a = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__a = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
__a = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__a = name.replace("mlp.fc2" , "output.dense" )
if name == "encoder.norm.weight":
__a = "layernorm.weight"
if name == "encoder.norm.bias":
__a = "layernorm.bias"
if "decoder" in name:
pass
else:
__a = "swin." + name
return name
def __A ( _A , _A ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__a = orig_state_dict.pop(__lowercase )
if "attn_mask" in key:
pass
elif "qkv" in key:
__a = key.split("." )
__a = int(key_split[2] )
__a = int(key_split[4] )
__a = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__a = val[:dim, :]
__a = val[
dim : dim * 2, :
]
__a = val[-dim:, :]
else:
__a = val[
:dim
]
__a = val[
dim : dim * 2
]
__a = val[
-dim:
]
else:
__a = val
return orig_state_dict
def __A ( _A , _A , _A , _A ):
"""simple docstring"""
__a = torch.load(__lowercase , map_location="cpu" )["model"]
__a = get_swin_config(__lowercase )
__a = SwinForMaskedImageModeling(__lowercase )
model.eval()
__a = convert_state_dict(__lowercase , __lowercase )
model.load_state_dict(__lowercase )
__a = "http://images.cocodataset.org/val2017/000000039769.jpg"
__a = ViTImageProcessor(size={"height": 192, "width": 192} )
__a = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
__a = image_processor(images=__lowercase , return_tensors="pt" )
with torch.no_grad():
__a = model(**__lowercase ).logits
print(outputs.keys() )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowercase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowercase )
if push_to_hub:
print(f"""Pushing model and image processor for {model_name} to hub""" )
model.push_to_hub(f"""microsoft/{model_name}""" )
image_processor.push_to_hub(f"""microsoft/{model_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""swin-base-simmim-window6-192""",
type=str,
choices=["""swin-base-simmim-window6-192""", """swin-large-simmim-window12-192"""],
help="""Name of the Swin SimMIM model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth""",
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 197 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
snake_case__ : Dict = 1_6
snake_case__ : List[str] = 3_2
def _snake_case (__lowercase , __lowercase = 16):
UpperCamelCase_ = AutoTokenizer.from_pretrained('bert-base-cased')
UpperCamelCase_ = load_dataset('glue' , 'mrpc')
def tokenize_function(__lowercase):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase_ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__lowercase , max_length=__lowercase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase_ = datasets.map(
__lowercase , batched=__lowercase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase_ = tokenized_datasets.rename_column('label' , 'labels')
def collate_fn(__lowercase):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase_ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase_ = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase_ = 8
else:
UpperCamelCase_ = None
return tokenizer.pad(
__lowercase , padding='longest' , max_length=__lowercase , pad_to_multiple_of=__lowercase , return_tensors='pt' , )
# Instantiate dataloaders.
UpperCamelCase_ = DataLoader(
tokenized_datasets['train'] , shuffle=__lowercase , collate_fn=__lowercase , batch_size=__lowercase)
UpperCamelCase_ = DataLoader(
tokenized_datasets['validation'] , shuffle=__lowercase , collate_fn=__lowercase , batch_size=__lowercase)
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
snake_case__ : List[str] = mocked_dataloaders # noqa: F811
def _snake_case (__lowercase , __lowercase):
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , __lowercase) == "1":
UpperCamelCase_ = 2
# New Code #
UpperCamelCase_ = int(args.gradient_accumulation_steps)
# Initialize accelerator
UpperCamelCase_ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__lowercase)
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`')
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase_ = config['lr']
UpperCamelCase_ = int(config['num_epochs'])
UpperCamelCase_ = int(config['seed'])
UpperCamelCase_ = int(config['batch_size'])
UpperCamelCase_ = evaluate.load('glue' , 'mrpc')
set_seed(__lowercase)
UpperCamelCase_ , UpperCamelCase_ = get_dataloaders(__lowercase , __lowercase)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase_ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=__lowercase)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase_ = model.to(accelerator.device)
# Instantiate optimizer
UpperCamelCase_ = AdamW(params=model.parameters() , lr=__lowercase)
# Instantiate scheduler
UpperCamelCase_ = get_linear_schedule_with_warmup(
optimizer=__lowercase , num_warmup_steps=100 , num_training_steps=(len(__lowercase) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = accelerator.prepare(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase)
# Now we train the model
for epoch in range(__lowercase):
model.train()
for step, batch in enumerate(__lowercase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__lowercase):
UpperCamelCase_ = model(**__lowercase)
UpperCamelCase_ = output.loss
accelerator.backward(__lowercase)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowercase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
UpperCamelCase_ = model(**__lowercase)
UpperCamelCase_ = outputs.logits.argmax(dim=-1)
UpperCamelCase_ , UpperCamelCase_ = accelerator.gather_for_metrics((predictions, batch['labels']))
metric.add_batch(
predictions=__lowercase , references=__lowercase , )
UpperCamelCase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __lowercase)
def _snake_case ():
UpperCamelCase_ = argparse.ArgumentParser(description='Simple example of training script.')
parser.add_argument(
'--mixed_precision' , type=__lowercase , default=__lowercase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=__lowercase , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.')
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(__lowercase , __lowercase)
if __name__ == "__main__":
main()
| 23 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case__ : List[str] = logging.get_logger(__name__)
snake_case__ : Any = {
"""kssteven/ibert-roberta-base""": """https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json""",
"""kssteven/ibert-roberta-large""": """https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json""",
"""kssteven/ibert-roberta-large-mnli""": (
"""https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE_ (UpperCAmelCase__ ):
'''simple docstring'''
_a = "ibert"
def __init__( self : Tuple , __a : Tuple=30_522 , __a : Tuple=768 , __a : int=12 , __a : List[Any]=12 , __a : str=3_072 , __a : str="gelu" , __a : List[str]=0.1 , __a : List[str]=0.1 , __a : List[str]=512 , __a : List[str]=2 , __a : Optional[int]=0.02 , __a : Any=1e-12 , __a : List[Any]=1 , __a : Tuple=0 , __a : Tuple=2 , __a : Union[str, Any]="absolute" , __a : str=False , __a : Tuple="none" , **__a : Union[str, Any] , ) ->Tuple:
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
lowerCamelCase_ : Optional[int] = vocab_size
lowerCamelCase_ : int = hidden_size
lowerCamelCase_ : Any = num_hidden_layers
lowerCamelCase_ : List[str] = num_attention_heads
lowerCamelCase_ : Tuple = hidden_act
lowerCamelCase_ : Any = intermediate_size
lowerCamelCase_ : Any = hidden_dropout_prob
lowerCamelCase_ : Dict = attention_probs_dropout_prob
lowerCamelCase_ : Union[str, Any] = max_position_embeddings
lowerCamelCase_ : Any = type_vocab_size
lowerCamelCase_ : Optional[Any] = initializer_range
lowerCamelCase_ : Union[str, Any] = layer_norm_eps
lowerCamelCase_ : str = position_embedding_type
lowerCamelCase_ : Tuple = quant_mode
lowerCamelCase_ : Any = force_dequant
class SCREAMING_SNAKE_CASE_ (UpperCAmelCase__ ):
'''simple docstring'''
@property
def _lowerCAmelCase ( self : Dict ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCamelCase_ : int = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCamelCase_ : List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 278 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class _a :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=2 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=10 , _UpperCAmelCase=3 , _UpperCAmelCase=32 * 8 , _UpperCAmelCase=32 * 8 , _UpperCAmelCase=4 , _UpperCAmelCase=64 , ) -> List[Any]:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = is_training
UpperCamelCase_ = use_auxiliary_loss
UpperCamelCase_ = num_queries
UpperCamelCase_ = num_channels
UpperCamelCase_ = min_size
UpperCamelCase_ = max_size
UpperCamelCase_ = num_labels
UpperCamelCase_ = hidden_dim
UpperCamelCase_ = hidden_dim
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_UpperCAmelCase )
UpperCamelCase_ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_UpperCAmelCase )
UpperCamelCase_ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_UpperCAmelCase ) > 0.5
).float()
UpperCamelCase_ = (torch.rand((self.batch_size, self.num_labels) , device=_UpperCAmelCase ) > 0.5).long()
UpperCamelCase_ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
UpperCamelCase_ = self.num_queries
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = [1, 1, 1, 1]
UpperCamelCase_ = self.num_channels
UpperCamelCase_ = 64
UpperCamelCase_ = 128
UpperCamelCase_ = self.hidden_dim
UpperCamelCase_ = self.hidden_dim
UpperCamelCase_ = self.hidden_dim
return config
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
UpperCamelCase_ = output.encoder_hidden_states
UpperCamelCase_ = output.pixel_decoder_hidden_states
UpperCamelCase_ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_UpperCAmelCase ) , config.decoder_layers )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Any:
with torch.no_grad():
UpperCamelCase_ = MaskaFormerModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase )
UpperCamelCase_ = model(_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
UpperCamelCase_ = MaskaFormerForUniversalSegmentation(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
def comm_check_on_output(_UpperCAmelCase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCamelCase_ = model(pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase )
UpperCamelCase_ = model(_UpperCAmelCase )
comm_check_on_output(_UpperCAmelCase )
UpperCamelCase_ = model(
pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase )
comm_check_on_output(_UpperCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
A_ = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {}
A_ = False
A_ = False
A_ = False
A_ = False
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = MaskaFormerModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_UpperCAmelCase , **_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_UpperCAmelCase )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Any:
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def _UpperCAmelCase ( self ) -> Any:
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _UpperCAmelCase ( self ) -> int:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> str:
pass
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_UpperCAmelCase )
UpperCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ = [*signature.parameters.keys()]
UpperCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
@slow
def _UpperCAmelCase ( self ) -> Tuple:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
UpperCamelCase_ = MaskaFormerModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = (self.model_tester.min_size,) * 2
UpperCamelCase_ = {
'pixel_values': torch.randn((2, 3, *size) , device=_UpperCAmelCase ),
'mask_labels': torch.randn((2, 10, *size) , device=_UpperCAmelCase ),
'class_labels': torch.zeros(2 , 10 , device=_UpperCAmelCase ).long(),
}
UpperCamelCase_ = self.model_tester.get_config()
UpperCamelCase_ = MaskaFormerForUniversalSegmentation(_UpperCAmelCase ).to(_UpperCAmelCase )
UpperCamelCase_ = model(**_UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_UpperCAmelCase , **_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_UpperCAmelCase ).to(_UpperCAmelCase )
UpperCamelCase_ = model(**_UpperCAmelCase , output_attentions=_UpperCAmelCase )
self.assertTrue(outputs.attentions is not None )
def _UpperCAmelCase ( self ) -> List[Any]:
if not self.model_tester.is_training:
return
UpperCamelCase_ = self.all_model_classes[1]
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
UpperCamelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
UpperCamelCase_ = model(_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase ).loss
loss.backward()
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = self.all_model_classes[1]
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
UpperCamelCase_ = True
UpperCamelCase_ = True
UpperCamelCase_ = model_class(_UpperCAmelCase ).to(_UpperCAmelCase )
model.train()
UpperCamelCase_ = model(_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase )
UpperCamelCase_ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCamelCase_ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
UpperCamelCase_ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCamelCase_ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_UpperCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
snake_case__ : List[Any] = 1E-4
def _snake_case ():
UpperCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_vision
@slow
class _a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCAmelCase ( self ) -> Optional[int]:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _UpperCAmelCase ( self ) -> List[str]:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase )
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
UpperCamelCase_ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCAmelCase , (1, 3, 384, 384) )
with torch.no_grad():
UpperCamelCase_ = model(**_UpperCAmelCase )
UpperCamelCase_ = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
UpperCamelCase_ = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
UpperCamelCase_ = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase ).eval()
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
UpperCamelCase_ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCAmelCase , (1, 3, 384, 384) )
with torch.no_grad():
UpperCamelCase_ = model(**_UpperCAmelCase )
# masks_queries_logits
UpperCamelCase_ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
UpperCamelCase_ = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
UpperCamelCase_ = torch.tensor(_UpperCAmelCase ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
# class_queries_logits
UpperCamelCase_ = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
UpperCamelCase_ = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase ).eval()
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , )
UpperCamelCase_ = inputs['pixel_values'].to(_UpperCAmelCase )
UpperCamelCase_ = [el.to(_UpperCAmelCase ) for el in inputs['mask_labels']]
UpperCamelCase_ = [el.to(_UpperCAmelCase ) for el in inputs['class_labels']]
with torch.no_grad():
UpperCamelCase_ = model(**_UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
| 23 | 0 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 435 |
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
snake_case__ : List[str] = logging.get_logger(__name__)
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = """vision-encoder-decoder"""
A_ = True
def __init__( self , **_UpperCAmelCase ) -> Dict:
super().__init__(**_UpperCAmelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f"""A configuraton of type {self.model_type} cannot be instantiated because """
f"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
UpperCamelCase_ = kwargs.pop('encoder' )
UpperCamelCase_ = encoder_config.pop('model_type' )
UpperCamelCase_ = kwargs.pop('decoder' )
UpperCamelCase_ = decoder_config.pop('model_type' )
UpperCamelCase_ = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
UpperCamelCase_ = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
UpperCamelCase_ = True
@classmethod
def _UpperCAmelCase ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> PretrainedConfig:
logger.info('Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
UpperCamelCase_ = True
UpperCamelCase_ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = copy.deepcopy(self.__dict__ )
UpperCamelCase_ = self.encoder.to_dict()
UpperCamelCase_ = self.decoder.to_dict()
UpperCamelCase_ = self.__class__.model_type
return output
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = version.parse("""1.11""" )
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _UpperCAmelCase ( self ) -> float:
return 1e-4
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict({'last_hidden_state': {0: 'batch', 1: 'encoder_sequence'}} )
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
UpperCamelCase_ = OrderedDict()
UpperCamelCase_ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
UpperCamelCase_ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
UpperCamelCase_ = {0: 'batch', 1: 'encoder_sequence'}
return common_inputs
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ) -> Mapping[str, Any]:
import torch
UpperCamelCase_ = OrderedDict()
UpperCamelCase_ = super().generate_dummy_inputs(
_UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = dummy_input['input_ids'].shape
UpperCamelCase_ = (batch, encoder_sequence, self._config.encoder_hidden_size)
UpperCamelCase_ = dummy_input.pop('input_ids' )
UpperCamelCase_ = dummy_input.pop('attention_mask' )
UpperCamelCase_ = torch.zeros(_UpperCAmelCase )
return common_inputs
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
@property
def _UpperCAmelCase ( self ) -> None:
pass
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> OnnxConfig:
return VisionEncoderDecoderEncoderOnnxConfig(_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = "default" ) -> OnnxConfig:
UpperCamelCase_ = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_UpperCAmelCase , _UpperCAmelCase )
| 23 | 0 |
'''simple docstring'''
def A_( A : List[Any] , A : int , A : Any , A : Dict , A : str):
if index == number_of_items:
return 0
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = knapsack(__lowercase , __lowercase , __lowercase , __lowercase , index + 1)
if weights[index] <= max_weight:
UpperCamelCase = values[index] + knapsack(
__lowercase , __lowercase , __lowercase , max_weight - weights[index] , index + 1)
return max(__lowercase , __lowercase)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _snake_case (__lowercase , __lowercase , __lowercase):
# Initialise PyTorch model
UpperCamelCase_ = MobileBertConfig.from_json_file(__lowercase)
print(f"""Building PyTorch model from configuration: {config}""")
UpperCamelCase_ = MobileBertForPreTraining(__lowercase)
# Load weights from tf checkpoint
UpperCamelCase_ = load_tf_weights_in_mobilebert(__lowercase , __lowercase , __lowercase)
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""")
torch.save(model.state_dict() , __lowercase)
if __name__ == "__main__":
snake_case__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--mobilebert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained MobileBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
snake_case__ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 23 | 0 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
a : str = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = """summarization"""
__SCREAMING_SNAKE_CASE = ["""loss"""]
__SCREAMING_SNAKE_CASE = ROUGE_KEYS
__SCREAMING_SNAKE_CASE = """rouge2"""
def __init__( self : Any , a_ : List[Any] , **a_ : Union[str, Any] ):
"""simple docstring"""
if hparams.sortish_sampler and hparams.gpus > 1:
__snake_case = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("Dynamic Batch size does not work for multi-gpu training" )
if hparams.sortish_sampler:
raise ValueError("--sortish_sampler and --max_tokens_per_batch may not be used simultaneously" )
super().__init__(_UpperCAmelCase , num_labels=_UpperCAmelCase , mode=self.mode , **_UpperCAmelCase )
use_task_specific_params(self.model , "summarization" )
save_git_info(self.hparams.output_dir )
__snake_case = Path(self.output_dir ) / "metrics.json"
__snake_case = Path(self.output_dir ) / "hparams.pkl"
pickle_save(self.hparams , self.hparams_save_path )
__snake_case = 0
__snake_case = defaultdict(_UpperCAmelCase )
__snake_case = self.config.model_type
__snake_case = self.config.tgt_vocab_size if self.model_type == "fsmt" else self.config.vocab_size
__snake_case = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
__snake_case = {
"train": self.hparams.n_train,
"val": self.hparams.n_val,
"test": self.hparams.n_test,
}
__snake_case = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
__snake_case = {
"train": self.hparams.max_target_length,
"val": self.hparams.val_max_target_length,
"test": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], f'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
__snake_case = get_git_info()["repo_sha"]
__snake_case = hparams.num_workers
__snake_case = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , _UpperCAmelCase ):
__snake_case = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
__snake_case = self.decoder_start_token_id
__snake_case = (
SeqaSeqDataset if hasattr(self.tokenizer , "prepare_seq2seq_batch" ) else LegacySeqaSeqDataset
)
__snake_case = False
__snake_case = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
__snake_case = self.hparams.eval_max_gen_length
else:
__snake_case = self.model.config.max_length
__snake_case = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def A ( self : Dict , a_ : Tuple ):
"""simple docstring"""
__snake_case = {
k: self.tokenizer.batch_decode(v.tolist() ) if "mask" not in k else v.shape for k, v in batch.items()
}
save_json(_UpperCAmelCase , Path(self.output_dir ) / "text_batch.json" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / "tok_batch.json" )
__snake_case = True
return readable_batch
def A ( self : Optional[Any] , a_ : Optional[Any] , **a_ : int ):
"""simple docstring"""
return self.model(_UpperCAmelCase , **_UpperCAmelCase )
def A ( self : Union[str, Any] , a_ : Any ):
"""simple docstring"""
__snake_case = self.tokenizer.batch_decode(
_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
return lmap(str.strip , _UpperCAmelCase )
def A ( self : Tuple , a_ : str ):
"""simple docstring"""
__snake_case = self.tokenizer.pad_token_id
__snake_case , __snake_case = batch["input_ids"], batch["attention_mask"]
__snake_case = batch["labels"]
if isinstance(self.model , _UpperCAmelCase ):
__snake_case = self.model._shift_right(_UpperCAmelCase )
else:
__snake_case = shift_tokens_right(_UpperCAmelCase , _UpperCAmelCase )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
__snake_case = decoder_input_ids
self.save_readable_batch(_UpperCAmelCase )
__snake_case = self(_UpperCAmelCase , attention_mask=_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase , use_cache=_UpperCAmelCase )
__snake_case = outputs["logits"]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
__snake_case = nn.CrossEntropyLoss(ignore_index=_UpperCAmelCase )
assert lm_logits.shape[-1] == self.vocab_size
__snake_case = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
__snake_case = nn.functional.log_softmax(_UpperCAmelCase , dim=-1 )
__snake_case , __snake_case = label_smoothed_nll_loss(
_UpperCAmelCase , _UpperCAmelCase , self.hparams.label_smoothing , ignore_index=_UpperCAmelCase )
return (loss,)
@property
def A ( self : Union[str, Any] ):
"""simple docstring"""
return self.tokenizer.pad_token_id
def A ( self : Optional[int] , a_ : str , a_ : Any ):
"""simple docstring"""
__snake_case = self._step(_UpperCAmelCase )
__snake_case = dict(zip(self.loss_names , _UpperCAmelCase ) )
# tokens per batch
__snake_case = batch["input_ids"].ne(self.pad ).sum() + batch["labels"].ne(self.pad ).sum()
__snake_case = batch["input_ids"].shape[0]
__snake_case = batch["input_ids"].eq(self.pad ).sum()
__snake_case = batch["input_ids"].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def A ( self : List[str] , a_ : Optional[Any] , a_ : str ):
"""simple docstring"""
return self._generative_step(_UpperCAmelCase )
def A ( self : int , a_ : Tuple , a_ : List[Any]="val" ):
"""simple docstring"""
self.step_count += 1
__snake_case = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
__snake_case = losses["loss"]
__snake_case = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["gen_time", "gen_len"]
}
__snake_case = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
__snake_case = torch.tensor(_UpperCAmelCase ).type_as(_UpperCAmelCase )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(_UpperCAmelCase )
__snake_case = {f'''{prefix}_avg_{k}''': x for k, x in losses.items()}
__snake_case = self.step_count
self.metrics[prefix].append(_UpperCAmelCase ) # callback writes this to self.metrics_save_path
__snake_case = flatten_list([x["preds"] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
f'''{prefix}_loss''': loss,
f'''{prefix}_{self.val_metric}''': metric_tensor,
}
def A ( self : Union[str, Any] , a_ : int , a_ : int ):
"""simple docstring"""
return calculate_rouge(_UpperCAmelCase , _UpperCAmelCase )
def A ( self : Any , a_ : Optional[int] ):
"""simple docstring"""
__snake_case = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
__snake_case = self.model.generate(
batch["input_ids"] , attention_mask=batch["attention_mask"] , use_cache=_UpperCAmelCase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
__snake_case = (time.time() - ta) / batch["input_ids"].shape[0]
__snake_case = self.ids_to_clean_text(_UpperCAmelCase )
__snake_case = self.ids_to_clean_text(batch["labels"] )
__snake_case = self._step(_UpperCAmelCase )
__snake_case = dict(zip(self.loss_names , _UpperCAmelCase ) )
__snake_case = self.calc_generative_metrics(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = np.mean(lmap(_UpperCAmelCase , _UpperCAmelCase ) )
base_metrics.update(gen_time=_UpperCAmelCase , gen_len=_UpperCAmelCase , preds=_UpperCAmelCase , target=_UpperCAmelCase , **_UpperCAmelCase )
return base_metrics
def A ( self : List[str] , a_ : Optional[Any] , a_ : Any ):
"""simple docstring"""
return self._generative_step(_UpperCAmelCase )
def A ( self : Dict , a_ : str ):
"""simple docstring"""
return self.validation_epoch_end(_UpperCAmelCase , prefix="test" )
def A ( self : int , a_ : str ):
"""simple docstring"""
__snake_case = self.n_obs[type_path]
__snake_case = self.target_lens[type_path]
__snake_case = self.dataset_class(
self.tokenizer , type_path=_UpperCAmelCase , n_obs=_UpperCAmelCase , max_target_length=_UpperCAmelCase , **self.dataset_kwargs , )
return dataset
def A ( self : List[str] , a_ : Tuple , a_ : List[Any] , a_ : Tuple = False ):
"""simple docstring"""
__snake_case = self.get_dataset(_UpperCAmelCase )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
__snake_case = dataset.make_sortish_sampler(_UpperCAmelCase , distributed=self.hparams.gpus > 1 )
return DataLoader(
_UpperCAmelCase , batch_size=_UpperCAmelCase , collate_fn=dataset.collate_fn , shuffle=_UpperCAmelCase , num_workers=self.num_workers , sampler=_UpperCAmelCase , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
__snake_case = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
_UpperCAmelCase , batch_sampler=_UpperCAmelCase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
_UpperCAmelCase , batch_size=_UpperCAmelCase , collate_fn=dataset.collate_fn , shuffle=_UpperCAmelCase , num_workers=self.num_workers , sampler=_UpperCAmelCase , )
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.get_dataloader("train" , batch_size=self.hparams.train_batch_size , shuffle=_UpperCAmelCase )
return dataloader
def A ( self : Optional[int] ):
"""simple docstring"""
return self.get_dataloader("val" , batch_size=self.hparams.eval_batch_size )
def A ( self : int ):
"""simple docstring"""
return self.get_dataloader("test" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def A ( a_ : int , a_ : Optional[int] ):
"""simple docstring"""
BaseTransformer.add_model_specific_args(_UpperCAmelCase , _UpperCAmelCase )
add_generic_args(_UpperCAmelCase , _UpperCAmelCase )
parser.add_argument(
"--max_source_length" , default=1_024 , type=_UpperCAmelCase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--max_target_length" , default=56 , type=_UpperCAmelCase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--val_max_target_length" , default=142 , type=_UpperCAmelCase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--test_max_target_length" , default=142 , type=_UpperCAmelCase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument("--freeze_encoder" , action="store_true" )
parser.add_argument("--freeze_embeds" , action="store_true" )
parser.add_argument("--sortish_sampler" , action="store_true" , default=_UpperCAmelCase )
parser.add_argument("--overwrite_output_dir" , action="store_true" , default=_UpperCAmelCase )
parser.add_argument("--max_tokens_per_batch" , type=_UpperCAmelCase , default=_UpperCAmelCase )
parser.add_argument("--logger_name" , type=_UpperCAmelCase , choices=["default", "wandb", "wandb_shared"] , default="default" )
parser.add_argument("--n_train" , type=_UpperCAmelCase , default=-1 , required=_UpperCAmelCase , help="# examples. -1 means use all." )
parser.add_argument("--n_val" , type=_UpperCAmelCase , default=500 , required=_UpperCAmelCase , help="# examples. -1 means use all." )
parser.add_argument("--n_test" , type=_UpperCAmelCase , default=-1 , required=_UpperCAmelCase , help="# examples. -1 means use all." )
parser.add_argument(
"--task" , type=_UpperCAmelCase , default="summarization" , required=_UpperCAmelCase , help="# examples. -1 means use all." )
parser.add_argument("--label_smoothing" , type=_UpperCAmelCase , default=0.0 , required=_UpperCAmelCase )
parser.add_argument("--src_lang" , type=_UpperCAmelCase , default="" , required=_UpperCAmelCase )
parser.add_argument("--tgt_lang" , type=_UpperCAmelCase , default="" , required=_UpperCAmelCase )
parser.add_argument("--eval_beams" , type=_UpperCAmelCase , default=_UpperCAmelCase , required=_UpperCAmelCase )
parser.add_argument(
"--val_metric" , type=_UpperCAmelCase , default=_UpperCAmelCase , required=_UpperCAmelCase , choices=["bleu", "rouge2", "loss", None] )
parser.add_argument("--eval_max_gen_length" , type=_UpperCAmelCase , default=_UpperCAmelCase , help="never generate more than n tokens" )
parser.add_argument("--save_top_k" , type=_UpperCAmelCase , default=1 , required=_UpperCAmelCase , help="How many checkpoints to save" )
parser.add_argument(
"--early_stopping_patience" , type=_UpperCAmelCase , default=-1 , required=_UpperCAmelCase , help=(
"-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"
" val_check_interval will effect it."
) , )
return parser
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = """translation"""
__SCREAMING_SNAKE_CASE = ["""loss"""]
__SCREAMING_SNAKE_CASE = ["""bleu"""]
__SCREAMING_SNAKE_CASE = """bleu"""
def __init__( self : str , a_ : Tuple , **a_ : Union[str, Any] ):
"""simple docstring"""
super().__init__(_UpperCAmelCase , **_UpperCAmelCase )
__snake_case = hparams.src_lang
__snake_case = hparams.tgt_lang
def A ( self : Tuple , a_ : Union[str, Any] , a_ : int ):
"""simple docstring"""
return calculate_bleu(_UpperCAmelCase , _UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple=None ) -> Dict:
Path(args.output_dir ).mkdir(exist_ok=__lowercase )
check_output_dir(__lowercase , expected_items=3 )
if model is None:
if "summarization" in args.task:
__snake_case = SummarizationModule(__lowercase )
else:
__snake_case = TranslationModule(__lowercase )
__snake_case = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("/tmp" )
or str(args.output_dir ).startswith("/var" )
):
__snake_case = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
__snake_case = os.environ.get("WANDB_PROJECT" , __lowercase )
__snake_case = WandbLogger(name=model.output_dir.name , project=__lowercase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
__snake_case = WandbLogger(name=model.output_dir.name , project=F'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
__snake_case = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
__snake_case = False
__snake_case = args.val_metric == "loss"
__snake_case = generic_train(
__lowercase , __lowercase , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , __lowercase ) , early_stopping_callback=__lowercase , logger=__lowercase , )
pickle_save(model.hparams , model.output_dir / "hparams.pkl" )
if not args.do_predict:
return model
__snake_case = ""
__snake_case = sorted(glob.glob(os.path.join(args.output_dir , "*.ckpt" ) , recursive=__lowercase ) )
if checkpoints:
__snake_case = checkpoints[-1]
__snake_case = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
a : Union[str, Any] = argparse.ArgumentParser()
a : Dict = pl.Trainer.add_argparse_args(parser)
a : List[str] = SummarizationModule.add_model_specific_args(parser, os.getcwd())
a : Optional[Any] = parser.parse_args()
main(args)
| 69 |
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _a ( unittest.TestCase ):
"""simple docstring"""
A_ = MODEL_FOR_MASKED_LM_MAPPING
A_ = TF_MODEL_FOR_MASKED_LM_MAPPING
def _UpperCAmelCase ( self ) -> List[str]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='tf' )
UpperCamelCase_ = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{'sequence': 'My name is grouped', 'score': 2.1e-05, 'token': 38015, 'token_str': ' grouped'},
{'sequence': 'My name is accuser', 'score': 2.1e-05, 'token': 25506, 'token_str': ' accuser'},
] , )
UpperCamelCase_ = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{
'sequence': 'The largest city in France is grouped',
'score': 2.1e-05,
'token': 38015,
'token_str': ' grouped',
},
{
'sequence': 'The largest city in France is accuser',
'score': 2.1e-05,
'token': 25506,
'token_str': ' accuser',
},
] , )
UpperCamelCase_ = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{'sequence': 'My name is Clara', 'score': 2e-05, 'token': 13606, 'token_str': ' Clara'},
{'sequence': 'My name is Patrick', 'score': 2e-05, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 1.9e-05, 'token': 2941, 'token_str': ' Te'},
] , )
@require_torch
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='pt' )
UpperCamelCase_ = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{'sequence': 'My name is Maul', 'score': 2.2e-05, 'token': 35676, 'token_str': ' Maul'},
{'sequence': 'My name isELS', 'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS'},
] , )
UpperCamelCase_ = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{
'sequence': 'The largest city in France is Maul',
'score': 2.2e-05,
'token': 35676,
'token_str': ' Maul',
},
{'sequence': 'The largest city in France isELS', 'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS'},
] , )
UpperCamelCase_ = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{'sequence': 'My name is Patrick', 'score': 2.1e-05, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 2e-05, 'token': 2941, 'token_str': ' Te'},
{'sequence': 'My name is Clara', 'score': 2e-05, 'token': 13606, 'token_str': ' Clara'},
] , )
UpperCamelCase_ = unmasker('My name is <mask> <mask>' , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
[
{
'score': 2.2e-05,
'token': 35676,
'token_str': ' Maul',
'sequence': '<s>My name is Maul<mask></s>',
},
{'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name isELS<mask></s>'},
],
[
{
'score': 2.2e-05,
'token': 35676,
'token_str': ' Maul',
'sequence': '<s>My name is<mask> Maul</s>',
},
{'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name is<mask>ELS</s>'},
],
] , )
@require_torch_gpu
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = pipeline('fill-mask' , model='hf-internal-testing/tiny-random-distilbert' , device=0 , framework='pt' )
# convert model to fp16
pipe.model.half()
UpperCamelCase_ = pipe('Paris is the [MASK] of France.' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
@slow
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='pt' )
self.run_large_test(_UpperCAmelCase )
@slow
@require_tf
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='tf' )
self.run_large_test(_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Tuple:
UpperCamelCase_ = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , [
{'sequence': 'My name is John', 'score': 0.0_0_8, 'token': 610, 'token_str': ' John'},
{'sequence': 'My name is Chris', 'score': 0.0_0_7, 'token': 1573, 'token_str': ' Chris'},
] , )
UpperCamelCase_ = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , [
{
'sequence': 'The largest city in France is Paris',
'score': 0.2_5_1,
'token': 2201,
'token_str': ' Paris',
},
{
'sequence': 'The largest city in France is Lyon',
'score': 0.2_1_4,
'token': 12790,
'token_str': ' Lyon',
},
] , )
UpperCamelCase_ = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , [
{'sequence': 'My name is Patrick', 'score': 0.0_0_5, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Clara', 'score': 0.0_0_0, 'token': 13606, 'token_str': ' Clara'},
{'sequence': 'My name is Te', 'score': 0.0_0_0, 'token': 2941, 'token_str': ' Te'},
] , )
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='pt' )
UpperCamelCase_ = None
UpperCamelCase_ = None
self.run_pipeline_test(_UpperCAmelCase , [] )
@require_tf
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='tf' )
UpperCamelCase_ = None
UpperCamelCase_ = None
self.run_pipeline_test(_UpperCAmelCase , [] )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('The provided tokenizer has no mask token, (probably reformer or wav2vec2)' )
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = [
f"""This is another {tokenizer.mask_token} test""",
]
return fill_masker, examples
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
UpperCamelCase_ = fill_masker.tokenizer
UpperCamelCase_ = fill_masker.model
UpperCamelCase_ = fill_masker(
f"""This is a {tokenizer.mask_token}""" , )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = fill_masker([f"""This is a {tokenizer.mask_token}"""] )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = fill_masker([f"""This is a {tokenizer.mask_token}""", f"""Another {tokenizer.mask_token} great test."""] )
self.assertEqual(
_UpperCAmelCase , [
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
] , )
with self.assertRaises(_UpperCAmelCase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_UpperCAmelCase ):
fill_masker('This is' )
self.run_test_top_k(_UpperCAmelCase , _UpperCAmelCase )
self.run_test_targets(_UpperCAmelCase , _UpperCAmelCase )
self.run_test_top_k_targets(_UpperCAmelCase , _UpperCAmelCase )
self.fill_mask_with_duplicate_targets_and_top_k(_UpperCAmelCase , _UpperCAmelCase )
self.fill_mask_with_multiple_masks(_UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
UpperCamelCase_ = tokenizer.get_vocab()
UpperCamelCase_ = sorted(vocab.keys() )[:2]
# Pipeline argument
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , targets=_UpperCAmelCase )
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , _UpperCAmelCase )
UpperCamelCase_ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(_UpperCAmelCase ) )
# Call argument
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , _UpperCAmelCase )
UpperCamelCase_ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(_UpperCAmelCase ) )
# Score equivalence
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase )
UpperCamelCase_ = [top_mask['token_str'] for top_mask in outputs]
UpperCamelCase_ = [top_mask['score'] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_UpperCAmelCase ) == set(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase )
UpperCamelCase_ = [top_mask['score'] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) )
# Raises with invalid
with self.assertRaises(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[''] )
with self.assertRaises(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets='' )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , top_k=2 )
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
UpperCamelCase_ = tokenizer.get_vocab()
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
# top_k=2, ntargets=3
UpperCamelCase_ = sorted(vocab.keys() )[:3]
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=_UpperCAmelCase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
UpperCamelCase_ = [el['token_str'] for el in sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x["score"] , reverse=_UpperCAmelCase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_UpperCAmelCase ).issubset(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=_UpperCAmelCase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = tokenizer.get_vocab()
# String duplicates + id duplicates
UpperCamelCase_ = sorted(vocab.keys() )[:3]
UpperCamelCase_ = [targets[0], targets[1], targets[0], targets[2], targets[1]]
UpperCamelCase_ = fill_masker(f"""My name is {tokenizer.mask_token}""" , targets=_UpperCAmelCase , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_UpperCAmelCase ) , 3 )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = fill_masker(
f"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
_UpperCAmelCase , [
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
] , )
| 23 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
A__ : List[str] = logging.getLogger(__name__)
@dataclass
class __snake_case :
_a = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_a = field(
default=UpperCAmelCase__ ,metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_a = field(
default=UpperCAmelCase__ ,metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_a = field(
default=UpperCAmelCase__ ,metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} ,)
_a = field(
default=UpperCAmelCase__ ,metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} ,)
_a = field(
default='''main''' ,metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} ,)
_a = field(
default=UpperCAmelCase__ ,metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} ,)
@dataclass
class __snake_case :
_a = field(default=UpperCAmelCase__ ,metadata={'''help''': '''The input training data file (a text file).'''} )
_a = field(
default=UpperCAmelCase__ ,metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} ,)
_a = field(
default=UpperCAmelCase__ ,metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
_a = field(
default=UpperCAmelCase__ ,metadata={'''help''': '''The number of processes to use for the preprocessing.'''} ,)
_a = field(
default=UpperCAmelCase__ ,metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. If passed, sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} ,)
_a = field(
default=UpperCAmelCase__ ,metadata={
'''help''': (
'''Whether to pad all samples to the maximum sentence length. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch. More '''
'''efficient on GPU but very bad for TPU.'''
)
} ,)
_a = field(
default=UpperCAmelCase__ ,metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} ,)
_a = field(
default=UpperCAmelCase__ ,metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} ,)
def UpperCAmelCase__ ( self : Dict):
if self.train_file is not None:
lowerCAmelCase_ : Optional[Any] = self.train_file.split('''.''')[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
lowerCAmelCase_ : Dict = self.validation_file.split('''.''')[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class __snake_case :
_a = 42
_a = True
_a = None
_a = None
def __call__( self : List[Any] , A_ : Optional[int]):
lowerCAmelCase_ : str = '''label''' if '''label''' in features[0].keys() else '''labels'''
lowerCAmelCase_ : List[Any] = [feature.pop(_UpperCAmelCase) for feature in features]
lowerCAmelCase_ : List[str] = len(_UpperCAmelCase)
lowerCAmelCase_ : Tuple = len(features[0]['''input_ids'''])
lowerCAmelCase_ : Any = [
[{k: v[i] for k, v in feature.items()} for i in range(_UpperCAmelCase)] for feature in features
]
lowerCAmelCase_ : Optional[Any] = list(chain(*_UpperCAmelCase))
lowerCAmelCase_ : List[Any] = self.tokenizer.pad(
_UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
# Un-flatten
lowerCAmelCase_ : Any = {k: v.view(_UpperCAmelCase , _UpperCAmelCase , -1) for k, v in batch.items()}
# Add back labels
lowerCAmelCase_ : int = torch.tensor(_UpperCAmelCase , dtype=torch.intaa)
return batch
def UpperCamelCase( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCAmelCase_ : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_swag''' ,__lowercase ,__lowercase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCAmelCase_ : Optional[int] = training_args.get_process_log_level()
logger.setLevel(__lowercase )
datasets.utils.logging.set_verbosity(__lowercase )
transformers.utils.logging.set_verbosity(__lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowerCAmelCase_ : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase_ : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
lowerCAmelCase_ : Optional[int] = {}
if data_args.train_file is not None:
lowerCAmelCase_ : Tuple = data_args.train_file
if data_args.validation_file is not None:
lowerCAmelCase_ : Dict = data_args.validation_file
lowerCAmelCase_ : Any = data_args.train_file.split('''.''' )[-1]
lowerCAmelCase_ : Optional[int] = load_dataset(
__lowercase ,data_files=__lowercase ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
else:
# Downloading and loading the swag dataset from the hub.
lowerCAmelCase_ : Optional[int] = load_dataset(
'''swag''' ,'''regular''' ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase_ : Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
lowerCAmelCase_ : int = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
lowerCAmelCase_ : List[str] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) ,config=__lowercase ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# When using your own dataset or a different dataset from swag, you will probably need to change this.
lowerCAmelCase_ : List[str] = [f"""ending{i}""" for i in range(4 )]
lowerCAmelCase_ : Optional[Any] = '''sent1'''
lowerCAmelCase_ : List[Any] = '''sent2'''
if data_args.max_seq_length is None:
lowerCAmelCase_ : Tuple = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
'''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'''
''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'''
''' override this default with `--block_size xxx`.''' )
lowerCAmelCase_ : int = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
lowerCAmelCase_ : int = min(data_args.max_seq_length ,tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(__UpperCamelCase : List[str] ):
lowerCAmelCase_ : List[str] = [[context] * 4 for context in examples[context_name]]
lowerCAmelCase_ : Optional[int] = examples[question_header_name]
lowerCAmelCase_ : Union[str, Any] = [
[f"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(__lowercase )
]
# Flatten out
lowerCAmelCase_ : int = list(chain(*__lowercase ) )
lowerCAmelCase_ : Any = list(chain(*__lowercase ) )
# Tokenize
lowerCAmelCase_ : str = tokenizer(
__lowercase ,__lowercase ,truncation=__lowercase ,max_length=__lowercase ,padding='''max_length''' if data_args.pad_to_max_length else False ,)
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 ,len(__lowercase ) ,4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowerCAmelCase_ : Tuple = raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowerCAmelCase_ : Dict = min(len(__lowercase ) ,data_args.max_train_samples )
lowerCAmelCase_ : Dict = train_dataset.select(range(__lowercase ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
lowerCAmelCase_ : List[Any] = train_dataset.map(
__lowercase ,batched=__lowercase ,num_proc=data_args.preprocessing_num_workers ,load_from_cache_file=not data_args.overwrite_cache ,)
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowerCAmelCase_ : Dict = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowerCAmelCase_ : int = min(len(__lowercase ) ,data_args.max_eval_samples )
lowerCAmelCase_ : List[Any] = eval_dataset.select(range(__lowercase ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
lowerCAmelCase_ : str = eval_dataset.map(
__lowercase ,batched=__lowercase ,num_proc=data_args.preprocessing_num_workers ,load_from_cache_file=not data_args.overwrite_cache ,)
# Data collator
lowerCAmelCase_ : str = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=__lowercase ,pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(__UpperCamelCase : Union[str, Any] ):
lowerCAmelCase_ , lowerCAmelCase_ : Dict = eval_predictions
lowerCAmelCase_ : Optional[int] = np.argmax(__lowercase ,axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
lowerCAmelCase_ : Optional[Any] = Trainer(
model=__lowercase ,args=__lowercase ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,tokenizer=__lowercase ,data_collator=__lowercase ,compute_metrics=__lowercase ,)
# Training
if training_args.do_train:
lowerCAmelCase_ : int = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase_ : Optional[int] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase_ : Dict = last_checkpoint
lowerCAmelCase_ : List[str] = trainer.train(resume_from_checkpoint=__lowercase )
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCAmelCase_ : List[Any] = train_result.metrics
lowerCAmelCase_ : Tuple = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__lowercase )
)
lowerCAmelCase_ : List[Any] = min(__lowercase ,len(__lowercase ) )
trainer.log_metrics('''train''' ,__lowercase )
trainer.save_metrics('''train''' ,__lowercase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCAmelCase_ : List[Any] = trainer.evaluate()
lowerCAmelCase_ : Any = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__lowercase )
lowerCAmelCase_ : Optional[int] = min(__lowercase ,len(__lowercase ) )
trainer.log_metrics('''eval''' ,__lowercase )
trainer.save_metrics('''eval''' ,__lowercase )
lowerCAmelCase_ : Optional[int] = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''multiple-choice''',
'''dataset_tags''': '''swag''',
'''dataset_args''': '''regular''',
'''dataset''': '''SWAG''',
'''language''': '''en''',
}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowercase )
else:
trainer.create_model_card(**__lowercase )
def UpperCamelCase( __UpperCamelCase : Tuple ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 171 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = StableDiffusionSAGPipeline
A_ = TEXT_TO_IMAGE_PARAMS
A_ = TEXT_TO_IMAGE_BATCH_PARAMS
A_ = TEXT_TO_IMAGE_IMAGE_PARAMS
A_ = TEXT_TO_IMAGE_IMAGE_PARAMS
A_ = False
def _UpperCAmelCase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCamelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
UpperCamelCase_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , )
torch.manual_seed(0 )
UpperCamelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCamelCase_ = CLIPTextModel(_UpperCAmelCase )
UpperCamelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCamelCase_ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase=0 ) -> List[Any]:
if str(_UpperCAmelCase ).startswith('mps' ):
UpperCamelCase_ = torch.manual_seed(_UpperCAmelCase )
else:
UpperCamelCase_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
UpperCamelCase_ = {
'prompt': '.',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 1.0,
'sag_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def _UpperCAmelCase ( self ) -> Tuple:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
UpperCamelCase_ = sag_pipe.to(_UpperCAmelCase )
sag_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCamelCase_ = '.'
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = sag_pipe(
[prompt] , generator=_UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
UpperCamelCase_ = output.images
UpperCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase_ = np.array([0.1_5_6_8, 0.1_7_3_8, 0.1_6_9_5, 0.1_6_9_3, 0.1_5_0_7, 0.1_7_0_5, 0.1_5_4_7, 0.1_7_5_1, 0.1_9_4_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
UpperCamelCase_ = sag_pipe.to(_UpperCAmelCase )
sag_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCamelCase_ = '.'
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = sag_pipe(
[prompt] , generator=_UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
UpperCamelCase_ = output.images
UpperCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase_ = np.array([0.3_4_5_9, 0.2_8_7_6, 0.2_5_3_7, 0.3_0_0_2, 0.2_6_7_1, 0.2_1_6_0, 0.3_0_2_6, 0.2_2_6_2, 0.2_3_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
UpperCamelCase_ = sag_pipe.to(_UpperCAmelCase )
sag_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCamelCase_ = '.'
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = sag_pipe(
[prompt] , width=768 , height=512 , generator=_UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' , )
UpperCamelCase_ = output.images
assert image.shape == (1, 512, 768, 3)
| 23 | 0 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class _A :
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=12 ,SCREAMING_SNAKE_CASE_=7 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=99 ,SCREAMING_SNAKE_CASE_=32 ,SCREAMING_SNAKE_CASE_=32 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=4 ,SCREAMING_SNAKE_CASE_=37 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=512 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=None ,):
'''simple docstring'''
snake_case : List[Any] = parent
snake_case : int = batch_size
snake_case : Union[str, Any] = seq_length
snake_case : Any = is_training
snake_case : int = use_input_mask
snake_case : Tuple = use_labels
snake_case : List[Any] = vocab_size
snake_case : Optional[Any] = hidden_size
snake_case : Union[str, Any] = projection_dim
snake_case : int = num_hidden_layers
snake_case : str = num_attention_heads
snake_case : List[str] = intermediate_size
snake_case : str = dropout
snake_case : Tuple = attention_dropout
snake_case : Tuple = max_position_embeddings
snake_case : List[Any] = initializer_range
snake_case : Optional[Any] = scope
snake_case : Tuple = bos_token_id
def snake_case_ ( self ):
'''simple docstring'''
snake_case : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
snake_case : List[str] = None
if self.use_input_mask:
snake_case : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
snake_case : Optional[int] = input_mask.numpy()
snake_case , snake_case : Tuple = input_mask.shape
snake_case : Optional[int] = np.random.randint(1 ,seq_length - 1 ,size=(batch_size,) )
for batch_idx, start_index in enumerate(_UpperCAmelCase ):
snake_case : Optional[Any] = 1
snake_case : Dict = 0
snake_case : Tuple = self.get_config()
return config, input_ids, tf.convert_to_tensor(_UpperCAmelCase )
def snake_case_ ( self ):
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,projection_dim=self.projection_dim ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,dropout=self.dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,bos_token_id=self.bos_token_id ,)
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : str = TFBlipTextModel(config=_UpperCAmelCase )
snake_case : Optional[int] = model(_UpperCAmelCase ,attention_mask=_UpperCAmelCase ,training=_UpperCAmelCase )
snake_case : List[Any] = model(_UpperCAmelCase ,training=_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[str] = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case : Optional[int] = config_and_inputs
snake_case : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _A ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Any = (TFBlipTextModel,) if is_tf_available() else ()
__lowerCamelCase : Tuple = False
__lowerCamelCase : Optional[int] = False
__lowerCamelCase : List[Any] = False
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[Any] = BlipTextModelTester(self )
snake_case : Optional[int] = ConfigTester(self ,config_class=_UpperCAmelCase ,hidden_size=37 )
def snake_case_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def snake_case_ ( self ):
'''simple docstring'''
pass
def snake_case_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""Blip does not use inputs_embeds""" )
def snake_case_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def snake_case_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def snake_case_ ( self ):
'''simple docstring'''
pass
@slow
def snake_case_ ( self ):
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : int = TFBlipTextModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_=True ):
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=_UpperCAmelCase )
| 36 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
snake_case__ : List[str] = TypeVar("""T""")
def _snake_case (__lowercase):
return (position - 1) // 2
def _snake_case (__lowercase):
return (2 * position) + 1
def _snake_case (__lowercase):
return (2 * position) + 2
class _a ( Generic[T] ):
"""simple docstring"""
def __init__( self ) -> None:
UpperCamelCase_ = []
UpperCamelCase_ = {}
UpperCamelCase_ = 0
def __len__( self ) -> int:
return self.elements
def __repr__( self ) -> str:
return str(self.heap )
def _UpperCAmelCase ( self ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
UpperCamelCase_ = self.elements
self.elements += 1
self._bubble_up(_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
UpperCamelCase_ , UpperCamelCase_ = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
UpperCamelCase_ , UpperCamelCase_ = self.heap[0]
self._bubble_down(_UpperCAmelCase )
return elem
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> None:
# Update the weight of the given key
UpperCamelCase_ = self.position_map[elem]
UpperCamelCase_ = (elem, weight)
if position > 0:
UpperCamelCase_ = get_parent_position(_UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(_UpperCAmelCase )
else:
self._bubble_down(_UpperCAmelCase )
else:
self._bubble_down(_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
UpperCamelCase_ = self.position_map[elem]
if curr_pos == 0:
return None
UpperCamelCase_ = get_parent_position(_UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = self.heap[curr_pos]
UpperCamelCase_ , UpperCamelCase_ = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_up(_UpperCAmelCase )
return None
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
UpperCamelCase_ = self.position_map[elem]
UpperCamelCase_ , UpperCamelCase_ = self.heap[curr_pos]
UpperCamelCase_ = get_child_left_position(_UpperCAmelCase )
UpperCamelCase_ = get_child_right_position(_UpperCAmelCase )
if child_left_position < self.elements and child_right_position < self.elements:
UpperCamelCase_ , UpperCamelCase_ = self.heap[child_left_position]
UpperCamelCase_ , UpperCamelCase_ = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_down(_UpperCAmelCase )
if child_left_position < self.elements:
UpperCamelCase_ , UpperCamelCase_ = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_down(_UpperCAmelCase )
else:
return None
if child_right_position < self.elements:
UpperCamelCase_ , UpperCamelCase_ = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_down(_UpperCAmelCase )
return None
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> None:
# Swap the nodes at the given positions
UpperCamelCase_ = self.heap[nodea_pos][0]
UpperCamelCase_ = self.heap[nodea_pos][0]
UpperCamelCase_ , UpperCamelCase_ = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
UpperCamelCase_ = nodea_pos
UpperCamelCase_ = nodea_pos
class _a ( Generic[T] ):
"""simple docstring"""
def __init__( self ) -> None:
UpperCamelCase_ = {}
UpperCamelCase_ = 0
def __repr__( self ) -> str:
return str(self.connections )
def __len__( self ) -> int:
return self.nodes
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
UpperCamelCase_ = {}
self.nodes += 1
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(_UpperCAmelCase )
self.add_node(_UpperCAmelCase )
UpperCamelCase_ = weight
UpperCamelCase_ = weight
def _snake_case (__lowercase , ):
UpperCamelCase_ = {node: maxsize for node in graph.connections}
UpperCamelCase_ = {node: None for node in graph.connections}
UpperCamelCase_ = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(__lowercase , __lowercase)
if priority_queue.is_empty():
return dist, parent
# initialization
UpperCamelCase_ = priority_queue.extract_min()
UpperCamelCase_ = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
UpperCamelCase_ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowercase , dist[neighbour])
UpperCamelCase_ = node
# running prim's algorithm
while not priority_queue.is_empty():
UpperCamelCase_ = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
UpperCamelCase_ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowercase , dist[neighbour])
UpperCamelCase_ = node
return dist, parent
| 23 | 0 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _lowerCAmelCase ():
"""simple docstring"""
a__ = HfArgumentParser(__lowercase )
a__ = parser.parse_args_into_dataclasses()[0]
a__ = TensorFlowBenchmark(args=__lowercase )
try:
a__ = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
a__ = "Arg --no_{0} is no longer used, please use --no-{0} instead."
a__ = " ".join(str(__lowercase ).split(" " )[:-1] )
a__ = ""
a__ = eval(str(__lowercase ).split(" " )[-1] )
a__ = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__lowercase )
if len(__lowercase ) > 0:
a__ = full_error_msg + begin_error_msg + str(__lowercase )
raise ValueError(__lowercase )
benchmark.run()
if __name__ == "__main__":
main()
| 331 |
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
snake_case__ : Dict = TypeVar("""T""")
class _a ( Generic[T] ):
"""simple docstring"""
A_ = 42 # Cache store of keys
A_ = 42 # References of the keys in cache
A_ = 10 # Maximum capacity of cache
def __init__( self , _UpperCAmelCase ) -> None:
UpperCamelCase_ = deque()
UpperCamelCase_ = set()
if not n:
UpperCamelCase_ = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
UpperCamelCase_ = n
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
UpperCamelCase_ = self.dq_store.pop()
self.key_reference.remove(_UpperCAmelCase )
else:
self.dq_store.remove(_UpperCAmelCase )
self.dq_store.appendleft(_UpperCAmelCase )
self.key_reference.add(_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> None:
for k in self.dq_store:
print(_UpperCAmelCase )
def __repr__( self ) -> str:
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ : LRUCache[str | int] = LRUCache(4)
lru_cache.refer("""A""")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("""A""")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 23 | 0 |
lowerCAmelCase_ = """Input must be a string of 8 numbers plus letter"""
lowerCAmelCase_ = """TRWAGMYFPDXBNJZSQVHLCKE"""
def A_ ( lowercase_ ) -> Optional[Any]:
if not isinstance(__lowercase , __lowercase ):
_snake_case : Dict = f'''Expected string as input, found {type(__lowercase ).__name__}'''
raise TypeError(__lowercase )
_snake_case : Optional[Any] = spanish_id.replace('''-''' , '''''' ).upper()
if len(__lowercase ) != 9:
raise ValueError(__lowercase )
try:
_snake_case : str = int(spanish_id_clean[0:8] )
_snake_case : str = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__lowercase ) from ex
if letter.isdigit():
raise ValueError(__lowercase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 |
import numpy as np
def _snake_case (__lowercase):
return 1 / (1 + np.exp(-vector))
def _snake_case (__lowercase):
return vector * sigmoid(__lowercase)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23 | 0 |
"""simple docstring"""
def lowercase (snake_case__ : Dict ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase = [1]
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 0, 0, 0
lowerCAmelCase = ugly_nums[ia] * 2
lowerCAmelCase = ugly_nums[ia] * 3
lowerCAmelCase = ugly_nums[ia] * 5
for _ in range(1 , __lowercase ):
lowerCAmelCase = min(__lowercase , __lowercase , __lowercase )
ugly_nums.append(__lowercase )
if next_num == next_a:
ia += 1
lowerCAmelCase = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
lowerCAmelCase = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
lowerCAmelCase = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f"""{ugly_numbers(2_0_0) = }""")
| 169 |
import math
from datetime import datetime, timedelta
def _snake_case (__lowercase):
UpperCamelCase_ = year % 19
UpperCamelCase_ = year % 4
UpperCamelCase_ = year % 7
UpperCamelCase_ = math.floor(year / 100)
UpperCamelCase_ = math.floor((13 + 8 * leap_day_inhibits) / 25)
UpperCamelCase_ = leap_day_inhibits / 4
UpperCamelCase_ = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
UpperCamelCase_ = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
UpperCamelCase_ = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
UpperCamelCase_ = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(__lowercase , 4 , 19)
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(__lowercase , 4 , 18)
else:
return datetime(__lowercase , 3 , 22) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday))
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
snake_case__ : Dict = """will be""" if year > datetime.now().year else """was"""
print(f'Easter in {year} {tense} {gauss_easter(year)}')
| 23 | 0 |
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = tuple[float, float, float]
SCREAMING_SNAKE_CASE_ = tuple[float, float, float]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a : Union[str, Any] = end_pointa[0] - end_pointa[0]
__a : List[Any] = end_pointa[1] - end_pointa[1]
__a : Optional[Any] = end_pointa[2] - end_pointa[2]
return (x, y, z)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a : int = ab[1] * ac[2] - ab[2] * ac[1] # *i
__a : str = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
__a : Tuple = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return tuple(round(__lowercase , __lowercase ) for x in vector ) == (0, 0, 0)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 10 ):
__a : Optional[Any] = create_vector(__lowercase , __lowercase )
__a : List[Any] = create_vector(__lowercase , __lowercase )
return is_zero_vector(get_ad_vectors_cross(__lowercase , __lowercase ) , __lowercase )
| 597 |
import requests
def _snake_case (__lowercase , __lowercase):
UpperCamelCase_ = {'Content-Type': 'application/json'}
UpperCamelCase_ = requests.post(__lowercase , json={'text': message_body} , headers=__lowercase)
if response.status_code != 200:
UpperCamelCase_ = (
'Request to slack returned an error '
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(__lowercase)
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
| 23 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class A_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = StableUnCLIPImgaImgPipeline
_SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_SCREAMING_SNAKE_CASE = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_SCREAMING_SNAKE_CASE = frozenset([] )
def _UpperCAmelCase ( self : str ):
__a = 32
__a = embedder_hidden_size
# image encoding components
__a = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
__a = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=_UpperCAmelCase , projection_dim=_UpperCAmelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
__a = StableUnCLIPImageNormalizer(embedding_dim=_UpperCAmelCase )
__a = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
__a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__a = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_UpperCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__a = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_UpperCAmelCase , layers_per_block=1 , upcast_attention=_UpperCAmelCase , use_linear_projection=_UpperCAmelCase , )
torch.manual_seed(0 )
__a = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type="v_prediction" , set_alpha_to_one=_UpperCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
__a = AutoencoderKL()
__a = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def _UpperCAmelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple=0 , __SCREAMING_SNAKE_CASE : Tuple=True ):
if str(_UpperCAmelCase ).startswith("mps" ):
__a = torch.manual_seed(_UpperCAmelCase )
else:
__a = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
__a = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
if pil_image:
__a = input_image * 0.5 + 0.5
__a = input_image.clamp(0 , 1 )
__a = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__a = DiffusionPipeline.numpy_to_pil(_UpperCAmelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def _UpperCAmelCase ( self : Union[str, Any] ):
__a = "cpu" # ensure determinism for the device-dependent torch.Generator
__a = self.get_dummy_components()
__a = StableUnCLIPImgaImgPipeline(**_UpperCAmelCase )
__a = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a = self.get_dummy_inputs(_UpperCAmelCase )
inputs.update({"image_embeds": None} )
__a = sd_pipe(**_UpperCAmelCase ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__a = np.array([0.38_72, 0.72_24, 0.56_01, 0.47_41, 0.68_72, 0.58_14, 0.46_36, 0.38_67, 0.50_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _UpperCAmelCase ( self : Optional[Any] ):
__a = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=_UpperCAmelCase )
def _UpperCAmelCase ( self : str ):
__a = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=_UpperCAmelCase )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _UpperCAmelCase ( self : List[Any] ):
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=_UpperCAmelCase )
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
def _UpperCAmelCase ( self : List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self : Union[str, Any] ):
__a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
__a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
__a = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__a = torch.Generator(device="cpu" ).manual_seed(0 )
__a = pipe(_UpperCAmelCase , "anime turle" , generator=_UpperCAmelCase , output_type="np" )
__a = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self : Union[str, Any] ):
__a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
__a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
__a = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__a = torch.Generator(device="cpu" ).manual_seed(0 )
__a = pipe(_UpperCAmelCase , "anime turle" , generator=_UpperCAmelCase , output_type="np" )
__a = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self : Tuple ):
__a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__a = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
__a = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__a = pipe(
_UpperCAmelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , )
__a = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 197 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Dict:
with open(_UpperCAmelCase , encoding='utf-8' ) as input_file:
UpperCamelCase_ = re.compile(R'(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)' )
UpperCamelCase_ = input_file.read()
UpperCamelCase_ = regexp.search(_UpperCAmelCase )
return match
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Dict:
with open(_UpperCAmelCase , encoding='utf-8' ) as input_file:
UpperCamelCase_ = re.compile(R'#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()' , re.DOTALL )
UpperCamelCase_ = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCamelCase_ = regexp.finditer(_UpperCAmelCase )
UpperCamelCase_ = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ = Path('./datasets' )
UpperCamelCase_ = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(_UpperCAmelCase ) ):
raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" )
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = Path('./datasets' )
UpperCamelCase_ = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_print_statements(str(_UpperCAmelCase ) ):
raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 23 | 0 |
import unittest
from transformers import DonutProcessor
snake_case__ : Union[str, Any] = """naver-clova-ix/donut-base"""
class SCREAMING_SNAKE_CASE_ (unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self : Union[str, Any] ) ->Any:
lowerCamelCase_ : Optional[Any] = DonutProcessor.from_pretrained(_UpperCAmelCase )
def _lowerCAmelCase ( self : Tuple ) ->List[Any]:
lowerCamelCase_ : Any = {
"""name""": """John Doe""",
"""age""": """99""",
"""city""": """Atlanta""",
"""state""": """GA""",
"""zip""": """30301""",
"""phone""": """123-4567""",
"""nicknames""": [{"""nickname""": """Johnny"""}, {"""nickname""": """JD"""}],
}
lowerCamelCase_ : Union[str, Any] = (
"""<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>"""
"""<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>"""
"""<s_nicknames><s_nickname>Johnny</s_nickname>"""
"""<sep/><s_nickname>JD</s_nickname></s_nicknames>"""
)
lowerCamelCase_ : List[str] = self.processor.tokenajson(_UpperCAmelCase )
self.assertDictEqual(_UpperCAmelCase , _UpperCAmelCase )
| 278 |
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def _snake_case (__lowercase=32 , __lowercase=10 , __lowercase=100 , __lowercase=1026 , __lowercase=True , __lowercase="data/tokenized_stories_train_wikitext103.jbl" , __lowercase="igf_context_pairs.jbl" , ):
set_seed(3)
# generate train_data and objective_set
UpperCamelCase_ , UpperCamelCase_ = generate_datasets(
__lowercase , __lowercase , number=__lowercase , min_len=1026 , trim=__lowercase)
# keeps model same across runs
set_seed(4)
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
UpperCamelCase_ = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# load pretrained model
UpperCamelCase_ = load_gpta('gpt2').to(__lowercase)
print('computing perplexity on objective set')
UpperCamelCase_ = compute_perplexity(__lowercase , __lowercase , __lowercase).item()
print('perplexity on objective set:' , __lowercase)
# collect igf pairs and save to file demo.jbl
collect_objective_set(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase)
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def _snake_case (__lowercase , __lowercase=15 , __lowercase=128 , __lowercase=100 , __lowercase="igf_model.pt" , ):
set_seed(42)
# Load pre-trained model
UpperCamelCase_ = GPTaLMHeadModel.from_pretrained('gpt2')
# Initialize secondary learner to use embedding weights of model
UpperCamelCase_ = SecondaryLearner(__lowercase)
# Train secondary learner
UpperCamelCase_ = train_secondary_learner(
__lowercase , __lowercase , max_epochs=__lowercase , batch_size=__lowercase , eval_freq=100 , igf_model_path=__lowercase , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase=32 , __lowercase=1000 , __lowercase=16 , __lowercase=1.0 , __lowercase=recopy_gpta , __lowercase=None , __lowercase=10 , __lowercase="gpt2_finetuned.pt" , ):
UpperCamelCase_ = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
UpperCamelCase_ = RandomSampler(__lowercase)
UpperCamelCase_ = DataLoader(__lowercase , sampler=__lowercase)
UpperCamelCase_ = max_steps // (len(__lowercase)) + 1
UpperCamelCase_ = 0
UpperCamelCase_ = torch.zeros((1, context_len) , dtype=torch.long , device=__lowercase)
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = recopy_model(__lowercase , __lowercase , __lowercase)
model.train()
if secondary_learner is not None:
secondary_learner.to(__lowercase)
secondary_learner.eval()
UpperCamelCase_ = []
UpperCamelCase_ = 0
UpperCamelCase_ = []
UpperCamelCase_ = []
# Compute the performance of the transformer model at the beginning
UpperCamelCase_ = compute_perplexity(__lowercase , __lowercase , __lowercase)
test_perps.append(__lowercase)
print('Test perplexity, step' , __lowercase , ':' , __lowercase)
for epoch in range(int(__lowercase)):
for step, example in enumerate(__lowercase):
torch.cuda.empty_cache()
UpperCamelCase_ = random.randint(0 , example.size(2) - context_len - 1)
UpperCamelCase_ = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
UpperCamelCase_ = model(__lowercase , labels=__lowercase)
UpperCamelCase_ = True
if secondary_learner is not None:
UpperCamelCase_ = secondary_learner.forward(
torch.tensor(__lowercase , dtype=torch.long , device=__lowercase).unsqueeze(0))[0].item()
observed_qs.append(float(__lowercase))
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
UpperCamelCase_ = -1
if predicted_q < threshold:
UpperCamelCase_ = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu()))
UpperCamelCase_ = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
UpperCamelCase_ = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0)
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
UpperCamelCase_ = compute_perplexity(__lowercase , __lowercase , __lowercase)
test_perps.append(__lowercase)
print('Test perplexity, step' , __lowercase , ':' , __lowercase)
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , __lowercase)
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def _snake_case ():
UpperCamelCase_ = argparse.ArgumentParser(description='Fine-tune a transformer model with IGF on a language modeling task')
# Required parameters
parser.add_argument(
'--data_dir' , default=__lowercase , type=__lowercase , required=__lowercase , help='The input data dir. Should contain data files for WikiText.' , )
parser.add_argument(
'--model_name_or_path' , default=__lowercase , type=__lowercase , required=__lowercase , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--data_file' , type=__lowercase , default=__lowercase , help=(
'A jbl file containing tokenized data which can be split as objective dataset, '
'train_dataset and test_dataset.'
) , )
parser.add_argument(
'--igf_data_file' , type=__lowercase , default=__lowercase , help='A jbl file containing the context and information gain pairs to train secondary learner.' , )
parser.add_argument(
'--output_dir' , default=__lowercase , type=__lowercase , required=__lowercase , help='The output directory where the final fine-tuned model is stored.' , )
parser.add_argument(
'--tokenizer_name' , default=__lowercase , type=__lowercase , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument('--seed' , type=__lowercase , default=__lowercase , help='A seed for reproducible training.')
parser.add_argument(
'--context_len' , default=32 , type=__lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--size_objective_set' , default=100 , type=__lowercase , help='number of articles that are long enough to be used as our objective set' , )
parser.add_argument(
'--eval_freq' , default=100 , type=__lowercase , help='secondary model evaluation is triggered at eval_freq')
parser.add_argument('--max_steps' , default=1000 , type=__lowercase , help='To calculate training epochs')
parser.add_argument(
'--secondary_learner_batch_size' , default=128 , type=__lowercase , help='batch size of training data for secondary learner' , )
parser.add_argument(
'--batch_size' , default=16 , type=__lowercase , help='batch size of training data of language model(gpt2) ')
parser.add_argument(
'--eval_interval' , default=10 , type=__lowercase , help=(
'decay the selectivity of our secondary learner filter from'
'1 standard deviation above average to 1 below average after 10 batches'
) , )
parser.add_argument(
'--number' , default=100 , type=__lowercase , help='The number of examples split to be used as objective_set/test_data')
parser.add_argument(
'--min_len' , default=1026 , type=__lowercase , help='The minimum length of the article to be used as objective set')
parser.add_argument(
'--secondary_learner_max_epochs' , default=15 , type=__lowercase , help='number of epochs to train secondary learner')
parser.add_argument('--trim' , default=__lowercase , type=__lowercase , help='truncate the example if it exceeds context length')
parser.add_argument(
'--threshold' , default=1.0 , type=__lowercase , help=(
'The threshold value used by secondary learner to filter the train_data and allow only'
' informative data as input to the model'
) , )
parser.add_argument('--finetuned_model_name' , default='gpt2_finetuned.pt' , type=__lowercase , help='finetuned_model_name')
parser.add_argument(
'--recopy_model' , default=__lowercase , type=__lowercase , help='Reset the model to the original pretrained GPT-2 weights after each iteration' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=__lowercase , data_file='data/tokenized_stories_train_wikitext103.jbl' , igf_data_file='igf_context_pairs.jbl' , )
# Load train data for secondary learner
UpperCamelCase_ = joblib.load('data/IGF_values.jbl')
# Train secondary learner
UpperCamelCase_ = training_secondary_learner(
__lowercase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='igf_model.pt' , )
# load pretrained gpt2 model
UpperCamelCase_ = GPTaLMHeadModel.from_pretrained('gpt2')
set_seed(42)
# Generate train and test data to train and evaluate gpt2 model
UpperCamelCase_ , UpperCamelCase_ = generate_datasets(
context_len=32 , file='data/tokenized_stories_train_wikitext103.jbl' , number=100 , min_len=1026 , trim=__lowercase)
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
__lowercase , __lowercase , __lowercase , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=__lowercase , secondary_learner=__lowercase , eval_interval=10 , finetuned_model_name='gpt2_finetuned.pt' , )
if __name__ == "__main__":
main()
| 23 | 0 |
'''simple docstring'''
def __A ( UpperCAmelCase ,UpperCAmelCase ) -> Dict:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = len(__lowercase )
_UpperCamelCase : Tuple = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
_UpperCamelCase : int = True
# sum is not zero and set is empty then false
for i in range(1 ,required_sum + 1 ):
_UpperCamelCase : Tuple = False
for i in range(1 ,arr_len + 1 ):
for j in range(1 ,required_sum + 1 ):
if arr[i - 1] > j:
_UpperCamelCase : Optional[int] = subset[i - 1][j]
if arr[i - 1] <= j:
_UpperCamelCase : List[str] = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 435 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class _a :
"""simple docstring"""
A_ = MBartConfig
A_ = {}
A_ = """gelu"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=20 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , ) -> Union[str, Any]:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = seq_length
UpperCamelCase_ = is_training
UpperCamelCase_ = use_labels
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = eos_token_id
UpperCamelCase_ = pad_token_id
UpperCamelCase_ = bos_token_id
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCamelCase_ = prepare_mbart_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return config, inputs_dict
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = TFMBartModel(config=_UpperCAmelCase ).get_decoder()
UpperCamelCase_ = inputs_dict['input_ids']
UpperCamelCase_ = input_ids[:1, :]
UpperCamelCase_ = inputs_dict['attention_mask'][:1, :]
UpperCamelCase_ = inputs_dict['head_mask']
UpperCamelCase_ = 1
# first forward pass
UpperCamelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = outputs.to_tuple()
UpperCamelCase_ = past_key_values[1]
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , ):
if attention_mask is None:
UpperCamelCase_ = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id) , tf.inta)
if decoder_attention_mask is None:
UpperCamelCase_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id) , tf.inta),
] , axis=-1 , )
if head_mask is None:
UpperCamelCase_ = tf.ones((config.encoder_layers, config.encoder_attention_heads))
if decoder_head_mask is None:
UpperCamelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads))
if cross_attn_head_mask is None:
UpperCamelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads))
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
A_ = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
A_ = (
{
"""conversational""": TFMBartForConditionalGeneration,
"""feature-extraction""": TFMBartModel,
"""summarization""": TFMBartForConditionalGeneration,
"""text2text-generation""": TFMBartForConditionalGeneration,
"""translation""": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
A_ = True
A_ = False
A_ = False
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = TFMBartModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
A_ = [
""" UN Chief Says There Is No Military Solution in Syria""",
]
A_ = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
]
A_ = """facebook/mbart-large-en-ro"""
@cached_property
def _UpperCAmelCase ( self ) -> Any:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _UpperCAmelCase ( self , **_UpperCAmelCase ) -> int:
UpperCamelCase_ = self.translate_src_text(**_UpperCAmelCase )
self.assertListEqual(self.expected_text , _UpperCAmelCase )
def _UpperCAmelCase ( self , **_UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = self.tokenizer(self.src_text , **_UpperCAmelCase , return_tensors='tf' )
UpperCamelCase_ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
UpperCamelCase_ = self.tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
return generated_words
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
self._assert_generated_batch_equal_expected()
| 23 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def A_( A : List[str] , A : Union[str, Any] , A : Union[str, Any]=None , A : str=None):
if attention_mask is None:
UpperCamelCase = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id) , tf.inta)
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class SCREAMING_SNAKE_CASE__ :
lowerCAmelCase_ = OPTConfig
lowerCAmelCase_ = {}
lowerCAmelCase_ = """gelu"""
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=False , A_=99 , A_=16 , A_=2 , A_=4 , A_=4 , A_="gelu" , A_=0.1 , A_=0.1 , A_=20 , A_=2 , A_=1 , A_=0 , A_=16 , A_=16 , )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = eos_token_id
UpperCamelCase = pad_token_id
UpperCamelCase = bos_token_id
UpperCamelCase = embed_dim
UpperCamelCase = word_embed_proj_dim
UpperCamelCase = False
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=_UpperCAmelCase , **self.config_updates , )
UpperCamelCase = prepare_opt_inputs_dict(_UpperCAmelCase , _UpperCAmelCase )
return config, inputs_dict
def UpperCAmelCase_ ( self , A_ , A_ )-> List[str]:
'''simple docstring'''
UpperCamelCase = TFOPTModel(config=_UpperCAmelCase )
UpperCamelCase = inputs_dict['input_ids']
UpperCamelCase = input_ids[:1, :]
UpperCamelCase = inputs_dict['attention_mask'][:1, :]
UpperCamelCase = 1
# first forward pass
UpperCamelCase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase )
UpperCamelCase , UpperCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCamelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCamelCase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
UpperCamelCase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx]
UpperCamelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_UpperCAmelCase , _UpperCAmelCase , rtol=1e-3 )
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase):
lowerCAmelCase_ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
lowerCAmelCase_ = (TFOPTForCausalLM,) if is_tf_available() else ()
lowerCAmelCase_ = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = 10
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = TFOPTModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=_UpperCAmelCase )
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase )
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(A_ , A_ ):
if hasattr(_UpperCAmelCase , 'weight' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(_UpperCAmelCase , 'weight' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
UpperCamelCase = model_class(config=_UpperCAmelCase )
UpperCamelCase = _get_word_embedding_weight(_UpperCAmelCase , model.get_input_embeddings() )
UpperCamelCase = _get_word_embedding_weight(_UpperCAmelCase , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(_UpperCAmelCase )
UpperCamelCase = _get_word_embedding_weight(_UpperCAmelCase , model.get_input_embeddings() )
UpperCamelCase = _get_word_embedding_weight(_UpperCAmelCase , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
UpperCamelCase = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , _UpperCAmelCase )
# check that weights remain the same after resizing
UpperCamelCase = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
UpperCamelCase = False
self.assertTrue(_UpperCAmelCase )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , _UpperCAmelCase )
UpperCamelCase = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
UpperCamelCase = False
self.assertTrue(_UpperCAmelCase )
def A_( A : Optional[int]):
return tf.constant(__lowercase , dtype=tf.intaa)
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
lowerCAmelCase_ = 99
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
UpperCamelCase = tf.ones((4, 1) , dtype=tf.intaa ) * 2
UpperCamelCase = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
UpperCamelCase = input_ids.shape[0]
UpperCamelCase = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
@slow
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
UpperCamelCase = TFOPTModel.from_pretrained('facebook/opt-350m' )
UpperCamelCase = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
UpperCamelCase = tf.not_equal(_UpperCAmelCase , model.config.pad_token_id )
with tf.GradientTape():
UpperCamelCase = model(input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase ).last_hidden_state
UpperCamelCase = (1, 11, 512)
self.assertEqual(output.shape , _UpperCAmelCase )
UpperCamelCase = tf.constant(
[[-0.2_873, -1.9_218, -0.3_033], [-1.2_710, -0.1_338, -0.1_902], [0.4_095, 0.1_214, -1.3_121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=4e-3 ) )
UpperCamelCase = tf.function(_UpperCAmelCase , jit_compile=_UpperCAmelCase )
UpperCamelCase = xla_generate(_UpperCAmelCase , _UpperCAmelCase )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=4e-2 ) )
@require_tf
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
super().setUp()
UpperCamelCase = 'facebook/opt-350m'
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = TFOPTForCausalLM.from_pretrained(self.path_model )
UpperCamelCase = GPTaTokenizer.from_pretrained(self.path_model )
UpperCamelCase = [
'Today is a beautiful day and I want to',
'In the city of',
'Paris is the capital of France and',
'Computers and mobile phones have taken',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
UpperCamelCase = tokenizer(_UpperCAmelCase , return_tensors='tf' , padding=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
UpperCamelCase = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
UpperCamelCase = tf.constant(
[
[1.3_851, -13.8923, -10.5229, -10.7533, -0.2_309, -10.2384, -0.5_365, -9.0_947, -5.1_670],
[-4.7_073, -10.6276, -3.9_415, -21.5242, -0.2_822, -0.2_822, -0.2_822, -0.2_822, -0.2_822],
[0.6_247, -3.4_229, -8.9_179, -1.4_297, -14.1650, 1.4_146, -9.0_218, -0.2_703, -0.2_703],
[6.4_783, -1.9_913, -10.7926, -2.3_336, 1.5_092, -0.9_974, -6.8_213, 1.3_477, 1.3_477],
] )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-4 ) )
UpperCamelCase = tf.function(_UpperCAmelCase , jit_compile=_UpperCAmelCase )
UpperCamelCase = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-4 ) )
@require_tf
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
@property
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = 'facebook/opt-125m'
UpperCamelCase = [
'Today is a beautiful day and I want to',
'In the city of New York, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
UpperCamelCase = []
UpperCamelCase = GPTaTokenizer.from_pretrained(_UpperCAmelCase )
UpperCamelCase = TFOPTForCausalLM.from_pretrained(_UpperCAmelCase )
for prompt in self.prompts:
UpperCamelCase = tokenizer(_UpperCAmelCase , return_tensors='tf' ).input_ids
UpperCamelCase = model.generate(_UpperCAmelCase , max_length=10 )
UpperCamelCase = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = 'facebook/opt-350m'
UpperCamelCase = GPTaTokenizer.from_pretrained(_UpperCAmelCase )
UpperCamelCase = TFOPTForCausalLM.from_pretrained(_UpperCAmelCase )
UpperCamelCase = 'left'
# use different length sentences to test batching
UpperCamelCase = [
'Hello, my dog is a little',
'Today, I',
]
UpperCamelCase = tokenizer(_UpperCAmelCase , return_tensors='tf' , padding=_UpperCAmelCase )
UpperCamelCase = inputs['input_ids']
UpperCamelCase = model.generate(input_ids=_UpperCAmelCase , attention_mask=inputs['attention_mask'] )
UpperCamelCase = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
UpperCamelCase = model.generate(input_ids=_UpperCAmelCase )
UpperCamelCase = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['attention_mask'][-1] , tf.intaa ) )
UpperCamelCase = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
UpperCamelCase = model.generate(input_ids=_UpperCAmelCase , max_length=model.config.max_length - num_paddings )
UpperCamelCase = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
UpperCamelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_UpperCAmelCase )
UpperCamelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=_UpperCAmelCase )
UpperCamelCase = [
'Hello, my dog is a little bit of a dork.\nI\'m a little bit',
'Today, I was in the middle of a conversation with a friend about the',
]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , [non_padded_sentence, padded_sentence] )
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
UpperCamelCase = 'facebook/opt-350m'
UpperCamelCase = [
'Today is a beautiful day and I want to',
'In the city of San Francisco, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
UpperCamelCase = []
UpperCamelCase = GPTaTokenizer.from_pretrained(_UpperCAmelCase )
UpperCamelCase = TFOPTForCausalLM.from_pretrained(_UpperCAmelCase )
for prompt in self.prompts:
UpperCamelCase = tokenizer(_UpperCAmelCase , return_tensors='tf' ).input_ids
UpperCamelCase = model.generate(_UpperCAmelCase , max_length=10 )
UpperCamelCase = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
| 3 |
def _snake_case (__lowercase):
UpperCamelCase_ = 1
for i in range(1 , num + 1):
fact *= i
return fact
def _snake_case (__lowercase):
UpperCamelCase_ = 0
while number > 0:
UpperCamelCase_ = number % 10
sum_of_digits += last_digit
UpperCamelCase_ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def _snake_case (__lowercase = 100):
UpperCamelCase_ = factorial(__lowercase)
UpperCamelCase_ = split_and_add(__lowercase)
return result
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 23 | 0 |
'''simple docstring'''
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
a : Optional[int] = get_tests_dir('''fixtures/test_sentencepiece.model''')
a : Dict = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
a : Tuple = """pt""" if is_torch_available() else """tf"""
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = CamembertTokenizer
__SCREAMING_SNAKE_CASE = CamembertTokenizerFast
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
def A ( self : Tuple ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__snake_case = CamembertTokenizer(_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = "<pad>"
__snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def A ( self : int ):
"""simple docstring"""
__snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>NOTUSED" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(_UpperCAmelCase ) , 1_004 )
def A ( self : Dict ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_005 )
def A ( self : str ):
"""simple docstring"""
__snake_case = CamembertTokenizer(_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
__snake_case = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
__snake_case = "I was born in 92000, and this is falsé."
__snake_case = tokenizer.encode(_UpperCAmelCase )
__snake_case = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
__snake_case = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
__snake_case = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
__snake_case = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def A ( self : Union[str, Any] ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__snake_case = self.get_tokenizer()
__snake_case = self.get_rust_tokenizer()
__snake_case = "I was born in 92000, and this is falsé."
__snake_case = tokenizer.tokenize(_UpperCAmelCase )
__snake_case = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
__snake_case = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = self.get_rust_tokenizer()
__snake_case = tokenizer.encode(_UpperCAmelCase )
__snake_case = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
@slow
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = {"input_ids": [[5, 54, 7_196, 297, 30, 23, 776, 18, 11, 3_215, 3_705, 8_252, 22, 3_164, 1_181, 2_116, 29, 16, 813, 25, 791, 3_314, 20, 3_446, 38, 27_575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9_088, 20, 1_517, 8, 22_804, 18_818, 10, 38, 629, 607, 607, 142, 19, 7_196, 867, 56, 10_326, 24, 2_267, 20, 416, 5_072, 15_612, 233, 734, 7, 2_399, 27, 16, 3_015, 1_649, 7, 24, 20, 4_338, 2_399, 27, 13, 3_400, 14, 13, 6_189, 8, 930, 9, 6]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
__snake_case = [
"Le transformeur est un modèle d\'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name="camembert-base" , revision="3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf" , sequences=_UpperCAmelCase , )
| 69 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
snake_case__ : str = logging.get_logger(__name__)
def _snake_case (__lowercase):
if isinstance(__lowercase , (list, tuple)) and isinstance(videos[0] , (list, tuple)) and is_valid_image(videos[0][0]):
return videos
elif isinstance(__lowercase , (list, tuple)) and is_valid_image(videos[0]):
return [videos]
elif is_valid_image(__lowercase):
return [[videos]]
raise ValueError(f"""Could not make batched video from {videos}""")
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = ["""pixel_values"""]
def __init__( self , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = PILImageResampling.BILINEAR , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 255 , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> None:
super().__init__(**_UpperCAmelCase )
UpperCamelCase_ = size if size is not None else {'shortest_edge': 224}
UpperCamelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
UpperCamelCase_ = crop_size if crop_size is not None else {'height': 224, 'width': 224}
UpperCamelCase_ = get_size_dict(_UpperCAmelCase , param_name='crop_size' )
UpperCamelCase_ = do_resize
UpperCamelCase_ = size
UpperCamelCase_ = do_center_crop
UpperCamelCase_ = crop_size
UpperCamelCase_ = resample
UpperCamelCase_ = do_rescale
UpperCamelCase_ = rescale_factor
UpperCamelCase_ = do_normalize
UpperCamelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = PILImageResampling.BILINEAR , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> np.ndarray:
UpperCamelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
if "shortest_edge" in size:
UpperCamelCase_ = get_resize_output_image_size(_UpperCAmelCase , size['shortest_edge'] , default_to_square=_UpperCAmelCase )
elif "height" in size and "width" in size:
UpperCamelCase_ = (size['height'], size['width'])
else:
raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> np.ndarray:
UpperCamelCase_ = get_size_dict(_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(_UpperCAmelCase , size=(size['height'], size['width']) , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> int:
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> np.ndarray:
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
UpperCamelCase_ = to_numpy_array(_UpperCAmelCase )
if do_resize:
UpperCamelCase_ = self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase )
if do_center_crop:
UpperCamelCase_ = self.center_crop(_UpperCAmelCase , size=_UpperCAmelCase )
if do_rescale:
UpperCamelCase_ = self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase )
if do_normalize:
UpperCamelCase_ = self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase )
UpperCamelCase_ = to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase )
return image
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , **_UpperCAmelCase , ) -> PIL.Image.Image:
UpperCamelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase_ = resample if resample is not None else self.resample
UpperCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase_ = image_std if image_std is not None else self.image_std
UpperCamelCase_ = size if size is not None else self.size
UpperCamelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
UpperCamelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase_ = get_size_dict(_UpperCAmelCase , param_name='crop_size' )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
UpperCamelCase_ = make_batched(_UpperCAmelCase )
UpperCamelCase_ = [
[
self._preprocess_image(
image=_UpperCAmelCase , do_resize=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , do_center_crop=_UpperCAmelCase , crop_size=_UpperCAmelCase , do_rescale=_UpperCAmelCase , rescale_factor=_UpperCAmelCase , do_normalize=_UpperCAmelCase , image_mean=_UpperCAmelCase , image_std=_UpperCAmelCase , data_format=_UpperCAmelCase , )
for img in video
]
for video in videos
]
UpperCamelCase_ = {'pixel_values': videos}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
| 23 | 0 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class __snake_case ( datasets.BeamBasedBuilder ):
def UpperCAmelCase__ ( self : Optional[int]):
return datasets.DatasetInfo(
features=datasets.Features({'''content''': datasets.Value('''string''')}) , supervised_keys=_UpperCAmelCase , )
def UpperCAmelCase__ ( self : Union[str, Any] , A_ : Optional[int] , A_ : Dict):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()})]
def UpperCAmelCase__ ( self : Any , A_ : Dict , A_ : int):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase)
class __snake_case ( datasets.BeamBasedBuilder ):
def UpperCAmelCase__ ( self : int):
return datasets.DatasetInfo(
features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''')})}) , supervised_keys=_UpperCAmelCase , )
def UpperCAmelCase__ ( self : int , A_ : Tuple , A_ : Union[str, Any]):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()})
]
def UpperCAmelCase__ ( self : Any , A_ : int , A_ : Union[str, Any]):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase)
def UpperCamelCase( ):
return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
def UpperCamelCase( ):
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
class __snake_case ( UpperCAmelCase__ ):
@require_beam
def UpperCAmelCase__ ( self : Dict):
lowerCAmelCase_ : List[str] = len(get_test_dummy_examples())
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCAmelCase_ : Dict = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner='''DirectRunner''')
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_UpperCAmelCase , builder.name , '''default''' , '''0.0.0''' , F"""{builder.name}-train.arrow""")))
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''')}))
lowerCAmelCase_ : Tuple = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , _UpperCAmelCase)
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _UpperCAmelCase)
self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1])
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1])
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''')))
del dset
@require_beam
def UpperCAmelCase__ ( self : int):
import apache_beam as beam
lowerCAmelCase_ : str = beam.io.parquetio.WriteToParquet
lowerCAmelCase_ : Optional[Any] = len(get_test_dummy_examples())
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCAmelCase_ : List[Any] = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner='''DirectRunner''')
with patch('''apache_beam.io.parquetio.WriteToParquet''') as write_parquet_mock:
lowerCAmelCase_ : Union[str, Any] = partial(_UpperCAmelCase , num_shards=2)
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_UpperCAmelCase , builder.name , '''default''' , '''0.0.0''' , F"""{builder.name}-train-00000-of-00002.arrow""")))
self.assertTrue(
os.path.exists(
os.path.join(
_UpperCAmelCase , builder.name , '''default''' , '''0.0.0''' , F"""{builder.name}-train-00000-of-00002.arrow""")))
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''')}))
lowerCAmelCase_ : Optional[int] = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , _UpperCAmelCase)
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _UpperCAmelCase)
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['''train''']['''content''']) , sorted(['''foo''', '''bar''', '''foobar''']))
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''')))
del dset
@require_beam
def UpperCAmelCase__ ( self : List[Any]):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCAmelCase_ : List[Any] = DummyBeamDataset(cache_dir=_UpperCAmelCase)
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare)
@require_beam
def UpperCAmelCase__ ( self : Tuple):
lowerCAmelCase_ : Any = len(get_test_nested_examples())
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCAmelCase_ : List[str] = NestedBeamDataset(cache_dir=_UpperCAmelCase , beam_runner='''DirectRunner''')
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_UpperCAmelCase , builder.name , '''default''' , '''0.0.0''' , F"""{builder.name}-train.arrow""")))
self.assertDictEqual(
builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''')})}))
lowerCAmelCase_ : Any = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , _UpperCAmelCase)
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _UpperCAmelCase)
self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1])
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1])
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''')))
del dset
| 171 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = 42
A_ = 42
class _a ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
A_ = 1
@register_to_config
def __init__( self , _UpperCAmelCase = 2000 , _UpperCAmelCase = 0.1_5 , _UpperCAmelCase = 0.0_1 , _UpperCAmelCase = 1_3_4_8.0 , _UpperCAmelCase = 1e-5 , _UpperCAmelCase = 1 , ) -> Tuple:
# standard deviation of the initial noise distribution
UpperCamelCase_ = sigma_max
# setable values
UpperCamelCase_ = None
self.set_sigmas(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> torch.FloatTensor:
return sample
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None ) -> str:
UpperCamelCase_ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
UpperCamelCase_ = torch.linspace(1 , _UpperCAmelCase , _UpperCAmelCase , device=_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None ) -> Any:
UpperCamelCase_ = sigma_min if sigma_min is not None else self.config.sigma_min
UpperCamelCase_ = sigma_max if sigma_max is not None else self.config.sigma_max
UpperCamelCase_ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
UpperCamelCase_ = torch.exp(torch.linspace(math.log(_UpperCAmelCase ) , math.log(_UpperCAmelCase ) , _UpperCAmelCase ) )
UpperCamelCase_ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = True , ) -> Union[SdeVeOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
UpperCamelCase_ = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
UpperCamelCase_ = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
UpperCamelCase_ = timesteps.to(self.discrete_sigmas.device )
UpperCamelCase_ = self.discrete_sigmas[timesteps].to(sample.device )
UpperCamelCase_ = self.get_adjacent_sigma(_UpperCAmelCase , _UpperCAmelCase ).to(sample.device )
UpperCamelCase_ = torch.zeros_like(_UpperCAmelCase )
UpperCamelCase_ = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
UpperCamelCase_ = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
UpperCamelCase_ = diffusion.unsqueeze(-1 )
UpperCamelCase_ = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
UpperCamelCase_ = randn_tensor(
sample.shape , layout=sample.layout , generator=_UpperCAmelCase , device=sample.device , dtype=sample.dtype )
UpperCamelCase_ = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
UpperCamelCase_ = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=_UpperCAmelCase , prev_sample_mean=_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = True , ) -> Union[SchedulerOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
UpperCamelCase_ = randn_tensor(sample.shape , layout=sample.layout , generator=_UpperCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
UpperCamelCase_ = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
UpperCamelCase_ = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
UpperCamelCase_ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
UpperCamelCase_ = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
UpperCamelCase_ = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
UpperCamelCase_ = step_size.unsqueeze(-1 )
UpperCamelCase_ = sample + step_size * model_output
UpperCamelCase_ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
UpperCamelCase_ = timesteps.to(original_samples.device )
UpperCamelCase_ = self.discrete_sigmas.to(original_samples.device )[timesteps]
UpperCamelCase_ = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(_UpperCAmelCase ) * sigmas[:, None, None, None]
)
UpperCamelCase_ = noise + original_samples
return noisy_samples
def __len__( self ) -> Optional[int]:
return self.config.num_train_timesteps
| 23 | 0 |
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
__lowercase : Dict = TypeVar('''T''')
class _A ( Generic[T] ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = 4_2 # Cache store of keys
__lowerCamelCase : Tuple = 4_2 # References of the keys in cache
__lowerCamelCase : Dict = 1_0 # Maximum capacity of cache
def __init__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : List[str] = deque()
snake_case : List[Any] = set()
if not n:
snake_case : Optional[Any] = sys.maxsize
elif n < 0:
raise ValueError("""n should be an integer greater than 0.""" )
else:
snake_case : Optional[Any] = n
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
snake_case : int = self.dq_store.pop()
self.key_reference.remove(_UpperCAmelCase )
else:
self.dq_store.remove(_UpperCAmelCase )
self.dq_store.appendleft(_UpperCAmelCase )
self.key_reference.add(_UpperCAmelCase )
def snake_case_ ( self ):
'''simple docstring'''
for k in self.dq_store:
print(_UpperCAmelCase )
def __repr__( self ):
'''simple docstring'''
return F"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowercase : LRUCache[str | int] = LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 36 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ : Optional[int] = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Dict = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
snake_case__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 23 | 0 |
'''simple docstring'''
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
a__ = [0] * len(__lowercase )
a__ = []
a__ = [1] * len(__lowercase )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__lowercase ) ):
if indegree[i] == 0:
queue.append(__lowercase )
while queue:
a__ = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
a__ = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__lowercase )
print(max(__lowercase ) )
# Adjacency list of Graph
UpperCamelCase_ : Union[str, Any] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 331 |
import datasets
from .evaluate import evaluate
snake_case__ : int = """\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
"""
snake_case__ : Union[str, Any] = """
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
"""
snake_case__ : Any = """
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the CUAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
'aupr': Area Under the Precision-Recall curve
'prec_at_80_recall': Precision at 80% recall
'prec_at_90_recall': Precision at 90% recall
Examples:
>>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> cuad_metric = datasets.load_metric(\"cuad\")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': {
'id': datasets.Value('string' ),
'prediction_text': datasets.features.Sequence(datasets.Value('string' ) ),
},
'references': {
'id': datasets.Value('string' ),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
},
} ) , codebase_urls=['https://www.atticusprojectai.org/cuad'] , reference_urls=['https://www.atticusprojectai.org/cuad'] , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
UpperCamelCase_ = {prediction['id']: prediction['prediction_text'] for prediction in predictions}
UpperCamelCase_ = [
{
'paragraphs': [
{
'qas': [
{
'answers': [{'text': answer_text} for answer_text in ref['answers']['text']],
'id': ref['id'],
}
for ref in references
]
}
]
}
]
UpperCamelCase_ = evaluate(dataset=_UpperCAmelCase , predictions=_UpperCAmelCase )
return score
| 23 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 326 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class _a ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> List[str]:
return datasets.DatasetInfo(
features=datasets.Features({'content': datasets.Value('string' )} ) , supervised_keys=_UpperCAmelCase , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_dummy_examples()} )]
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase )
class _a ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Any:
return datasets.DatasetInfo(
features=datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) , supervised_keys=_UpperCAmelCase , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_nested_examples()} )
]
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase )
def _snake_case ():
return [(i, {"content": content}) for i, content in enumerate(['foo', 'bar', 'foobar'])]
def _snake_case ():
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['foo', 'bar', 'foobar'])]
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
@require_beam
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase_ = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
UpperCamelCase_ = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , _UpperCAmelCase )
self.assertEqual(dset['train'].info.splits['train'].num_examples , _UpperCAmelCase )
self.assertDictEqual(dset['train'][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def _UpperCAmelCase ( self ) -> List[str]:
import apache_beam as beam
UpperCamelCase_ = beam.io.parquetio.WriteToParquet
UpperCamelCase_ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase_ = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner='DirectRunner' )
with patch('apache_beam.io.parquetio.WriteToParquet' ) as write_parquet_mock:
UpperCamelCase_ = partial(_UpperCAmelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
UpperCamelCase_ = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , _UpperCAmelCase )
self.assertEqual(dset['train'].info.splits['train'].num_examples , _UpperCAmelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['train']['content'] ) , sorted(['foo', 'bar', 'foobar'] ) )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def _UpperCAmelCase ( self ) -> Any:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase_ = DummyBeamDataset(cache_dir=_UpperCAmelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase_ = NestedBeamDataset(cache_dir=_UpperCAmelCase , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) )
UpperCamelCase_ = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , _UpperCAmelCase )
self.assertEqual(dset['train'].info.splits['train'].num_examples , _UpperCAmelCase )
self.assertDictEqual(dset['train'][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
| 23 | 0 |
"""simple docstring"""
from typing import List
from .keymap import KEYMAP, get_character
def lowercase (snake_case__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
def decorator(snake_case__ : Tuple ):
lowerCAmelCase = getattr(__lowercase , """handle_key""" , [] )
handle += [key]
setattr(__lowercase , """handle_key""" , __lowercase )
return func
return decorator
def lowercase (*snake_case__ : Dict ) -> Any:
'''simple docstring'''
def decorator(snake_case__ : Tuple ):
lowerCAmelCase = getattr(__lowercase , """handle_key""" , [] )
handle += keys
setattr(__lowercase , """handle_key""" , __lowercase )
return func
return decorator
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __new__( cls : int , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : List[Any] ):
lowerCAmelCase = super().__new__(cls , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if not hasattr(_UpperCAmelCase , """key_handler""" ):
setattr(_UpperCAmelCase , """key_handler""" , {} )
setattr(_UpperCAmelCase , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
lowerCAmelCase = getattr(_UpperCAmelCase , """handle_key""" , [] )
for key in handled_keys:
lowerCAmelCase = value
return new_cls
@staticmethod
def __lowercase ( cls : Tuple ):
lowerCAmelCase = get_character()
if char != KEYMAP["undefined"]:
lowerCAmelCase = ord(_UpperCAmelCase )
lowerCAmelCase = cls.key_handler.get(_UpperCAmelCase )
if handler:
lowerCAmelCase = char
return handler(cls )
else:
return None
def lowercase (cls : Optional[Any] ) -> Dict:
'''simple docstring'''
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 169 |
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def _snake_case (__lowercase , __lowercase , __lowercase):
# Initialise PyTorch model
UpperCamelCase_ = AlbertConfig.from_json_file(__lowercase)
print(f"""Building PyTorch model from configuration: {config}""")
UpperCamelCase_ = AlbertForPreTraining(__lowercase)
# Load weights from tf checkpoint
load_tf_weights_in_albert(__lowercase , __lowercase , __lowercase)
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""")
torch.save(model.state_dict() , __lowercase)
if __name__ == "__main__":
snake_case__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--albert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained ALBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
snake_case__ : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 23 | 0 |
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = """T5Config"""
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a : Tuple = jnp.zeros_like(__lowercase )
__a : Optional[Any] = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
__a : Optional[Any] = shifted_input_ids.at[:, 0].set(__lowercase )
__a : List[str] = jnp.where(shifted_input_ids == -100 , __lowercase , __lowercase )
return shifted_input_ids
class lowerCAmelCase ( UpperCAmelCase__ ):
"""simple docstring"""
_A = 'mt5'
_A = MTaConfig
class lowerCAmelCase ( UpperCAmelCase__ ):
"""simple docstring"""
_A = 'mt5'
_A = MTaConfig
class lowerCAmelCase ( UpperCAmelCase__ ):
"""simple docstring"""
_A = 'mt5'
_A = MTaConfig
| 597 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
@slow
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' )
UpperCamelCase_ = BertTokenizer.from_pretrained('bert-base-uncased' )
UpperCamelCase_ = bertabert.config.encoder.vocab_size
UpperCamelCase_ = tokenizer.sep_token_id
UpperCamelCase_ = tokenizer.cls_token_id
UpperCamelCase_ = 128
UpperCamelCase_ = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' )
UpperCamelCase_ = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' )
UpperCamelCase_ = train_dataset.select(range(32 ) )
UpperCamelCase_ = val_dataset.select(range(16 ) )
UpperCamelCase_ = 4
def _map_to_encoder_decoder_inputs(_UpperCAmelCase ):
# Tokenizer will automatically set [BOS] <text> [EOS]
UpperCamelCase_ = tokenizer(batch['article'] , padding='max_length' , truncation=_UpperCAmelCase , max_length=512 )
UpperCamelCase_ = tokenizer(batch['highlights'] , padding='max_length' , truncation=_UpperCAmelCase , max_length=128 )
UpperCamelCase_ = inputs.input_ids
UpperCamelCase_ = inputs.attention_mask
UpperCamelCase_ = outputs.input_ids
UpperCamelCase_ = outputs.input_ids.copy()
UpperCamelCase_ = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
UpperCamelCase_ = outputs.attention_mask
assert all(len(_UpperCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(_UpperCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_UpperCAmelCase ):
UpperCamelCase_ = pred.label_ids
UpperCamelCase_ = pred.predictions
# all unnecessary tokens are removed
UpperCamelCase_ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
UpperCamelCase_ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
UpperCamelCase_ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_UpperCAmelCase ) )] ) / len(_UpperCAmelCase )
return {"accuracy": accuracy}
# map train dataset
UpperCamelCase_ = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_UpperCAmelCase , batch_size=_UpperCAmelCase , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
UpperCamelCase_ = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_UpperCAmelCase , batch_size=_UpperCAmelCase , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = SeqaSeqTrainingArguments(
output_dir=_UpperCAmelCase , per_device_train_batch_size=_UpperCAmelCase , per_device_eval_batch_size=_UpperCAmelCase , predict_with_generate=_UpperCAmelCase , evaluation_strategy='steps' , do_train=_UpperCAmelCase , do_eval=_UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
UpperCamelCase_ = SeqaSeqTrainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , tokenizer=_UpperCAmelCase , )
# start training
trainer.train()
| 23 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : Dict = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Dict = ["""XLNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : int = ["""XLNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Any = [
"""XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLNetForMultipleChoice""",
"""XLNetForQuestionAnswering""",
"""XLNetForQuestionAnsweringSimple""",
"""XLNetForSequenceClassification""",
"""XLNetForTokenClassification""",
"""XLNetLMHeadModel""",
"""XLNetModel""",
"""XLNetPreTrainedModel""",
"""load_tf_weights_in_xlnet""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[Any] = [
"""TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLNetForMultipleChoice""",
"""TFXLNetForQuestionAnsweringSimple""",
"""TFXLNetForSequenceClassification""",
"""TFXLNetForTokenClassification""",
"""TFXLNetLMHeadModel""",
"""TFXLNetMainLayer""",
"""TFXLNetModel""",
"""TFXLNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 197 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
snake_case__ : Dict = 1_6
snake_case__ : List[str] = 3_2
def _snake_case (__lowercase , __lowercase = 16):
UpperCamelCase_ = AutoTokenizer.from_pretrained('bert-base-cased')
UpperCamelCase_ = load_dataset('glue' , 'mrpc')
def tokenize_function(__lowercase):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase_ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__lowercase , max_length=__lowercase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase_ = datasets.map(
__lowercase , batched=__lowercase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase_ = tokenized_datasets.rename_column('label' , 'labels')
def collate_fn(__lowercase):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase_ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase_ = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase_ = 8
else:
UpperCamelCase_ = None
return tokenizer.pad(
__lowercase , padding='longest' , max_length=__lowercase , pad_to_multiple_of=__lowercase , return_tensors='pt' , )
# Instantiate dataloaders.
UpperCamelCase_ = DataLoader(
tokenized_datasets['train'] , shuffle=__lowercase , collate_fn=__lowercase , batch_size=__lowercase)
UpperCamelCase_ = DataLoader(
tokenized_datasets['validation'] , shuffle=__lowercase , collate_fn=__lowercase , batch_size=__lowercase)
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
snake_case__ : List[str] = mocked_dataloaders # noqa: F811
def _snake_case (__lowercase , __lowercase):
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , __lowercase) == "1":
UpperCamelCase_ = 2
# New Code #
UpperCamelCase_ = int(args.gradient_accumulation_steps)
# Initialize accelerator
UpperCamelCase_ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__lowercase)
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`')
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase_ = config['lr']
UpperCamelCase_ = int(config['num_epochs'])
UpperCamelCase_ = int(config['seed'])
UpperCamelCase_ = int(config['batch_size'])
UpperCamelCase_ = evaluate.load('glue' , 'mrpc')
set_seed(__lowercase)
UpperCamelCase_ , UpperCamelCase_ = get_dataloaders(__lowercase , __lowercase)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase_ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=__lowercase)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase_ = model.to(accelerator.device)
# Instantiate optimizer
UpperCamelCase_ = AdamW(params=model.parameters() , lr=__lowercase)
# Instantiate scheduler
UpperCamelCase_ = get_linear_schedule_with_warmup(
optimizer=__lowercase , num_warmup_steps=100 , num_training_steps=(len(__lowercase) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = accelerator.prepare(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase)
# Now we train the model
for epoch in range(__lowercase):
model.train()
for step, batch in enumerate(__lowercase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__lowercase):
UpperCamelCase_ = model(**__lowercase)
UpperCamelCase_ = output.loss
accelerator.backward(__lowercase)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowercase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
UpperCamelCase_ = model(**__lowercase)
UpperCamelCase_ = outputs.logits.argmax(dim=-1)
UpperCamelCase_ , UpperCamelCase_ = accelerator.gather_for_metrics((predictions, batch['labels']))
metric.add_batch(
predictions=__lowercase , references=__lowercase , )
UpperCamelCase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __lowercase)
def _snake_case ():
UpperCamelCase_ = argparse.ArgumentParser(description='Simple example of training script.')
parser.add_argument(
'--mixed_precision' , type=__lowercase , default=__lowercase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=__lowercase , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.')
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(__lowercase , __lowercase)
if __name__ == "__main__":
main()
| 23 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case__ : Tuple = logging.get_logger(__name__)
snake_case__ : List[str] = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class SCREAMING_SNAKE_CASE_ (UpperCAmelCase__ ):
'''simple docstring'''
_a = "yolos"
def __init__( self : Tuple , __a : str=768 , __a : Union[str, Any]=12 , __a : List[str]=12 , __a : Dict=3_072 , __a : str="gelu" , __a : List[Any]=0.0 , __a : List[str]=0.0 , __a : Union[str, Any]=0.02 , __a : Optional[int]=1e-12 , __a : str=[512, 864] , __a : int=16 , __a : Dict=3 , __a : List[Any]=True , __a : Union[str, Any]=100 , __a : Union[str, Any]=True , __a : Tuple=False , __a : str=1 , __a : str=5 , __a : Optional[Any]=2 , __a : Dict=5 , __a : Optional[Any]=2 , __a : int=0.1 , **__a : int , ) ->List[str]:
super().__init__(**_UpperCAmelCase )
lowerCamelCase_ : Union[str, Any] = hidden_size
lowerCamelCase_ : Dict = num_hidden_layers
lowerCamelCase_ : Union[str, Any] = num_attention_heads
lowerCamelCase_ : Dict = intermediate_size
lowerCamelCase_ : Any = hidden_act
lowerCamelCase_ : List[Any] = hidden_dropout_prob
lowerCamelCase_ : str = attention_probs_dropout_prob
lowerCamelCase_ : int = initializer_range
lowerCamelCase_ : Tuple = layer_norm_eps
lowerCamelCase_ : int = image_size
lowerCamelCase_ : Dict = patch_size
lowerCamelCase_ : int = num_channels
lowerCamelCase_ : Any = qkv_bias
lowerCamelCase_ : List[Any] = num_detection_tokens
lowerCamelCase_ : Optional[Any] = use_mid_position_embeddings
lowerCamelCase_ : Union[str, Any] = auxiliary_loss
# Hungarian matcher
lowerCamelCase_ : Any = class_cost
lowerCamelCase_ : Tuple = bbox_cost
lowerCamelCase_ : Tuple = giou_cost
# Loss coefficients
lowerCamelCase_ : Optional[int] = bbox_loss_coefficient
lowerCamelCase_ : str = giou_loss_coefficient
lowerCamelCase_ : Union[str, Any] = eos_coefficient
class SCREAMING_SNAKE_CASE_ (UpperCAmelCase__ ):
'''simple docstring'''
_a = version.parse("1.11" )
@property
def _lowerCAmelCase ( self : List[Any] ) ->Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _lowerCAmelCase ( self : List[Any] ) ->float:
return 1e-4
@property
def _lowerCAmelCase ( self : Tuple ) ->int:
return 12
| 278 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class _a :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=2 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=10 , _UpperCAmelCase=3 , _UpperCAmelCase=32 * 8 , _UpperCAmelCase=32 * 8 , _UpperCAmelCase=4 , _UpperCAmelCase=64 , ) -> List[Any]:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = is_training
UpperCamelCase_ = use_auxiliary_loss
UpperCamelCase_ = num_queries
UpperCamelCase_ = num_channels
UpperCamelCase_ = min_size
UpperCamelCase_ = max_size
UpperCamelCase_ = num_labels
UpperCamelCase_ = hidden_dim
UpperCamelCase_ = hidden_dim
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_UpperCAmelCase )
UpperCamelCase_ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_UpperCAmelCase )
UpperCamelCase_ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_UpperCAmelCase ) > 0.5
).float()
UpperCamelCase_ = (torch.rand((self.batch_size, self.num_labels) , device=_UpperCAmelCase ) > 0.5).long()
UpperCamelCase_ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
UpperCamelCase_ = self.num_queries
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = [1, 1, 1, 1]
UpperCamelCase_ = self.num_channels
UpperCamelCase_ = 64
UpperCamelCase_ = 128
UpperCamelCase_ = self.hidden_dim
UpperCamelCase_ = self.hidden_dim
UpperCamelCase_ = self.hidden_dim
return config
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
UpperCamelCase_ = output.encoder_hidden_states
UpperCamelCase_ = output.pixel_decoder_hidden_states
UpperCamelCase_ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_UpperCAmelCase ) , config.decoder_layers )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Any:
with torch.no_grad():
UpperCamelCase_ = MaskaFormerModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase )
UpperCamelCase_ = model(_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
UpperCamelCase_ = MaskaFormerForUniversalSegmentation(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
def comm_check_on_output(_UpperCAmelCase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCamelCase_ = model(pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase )
UpperCamelCase_ = model(_UpperCAmelCase )
comm_check_on_output(_UpperCAmelCase )
UpperCamelCase_ = model(
pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase )
comm_check_on_output(_UpperCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
A_ = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {}
A_ = False
A_ = False
A_ = False
A_ = False
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = MaskaFormerModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_UpperCAmelCase , **_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_UpperCAmelCase )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Any:
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def _UpperCAmelCase ( self ) -> Any:
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _UpperCAmelCase ( self ) -> int:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> str:
pass
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_UpperCAmelCase )
UpperCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ = [*signature.parameters.keys()]
UpperCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
@slow
def _UpperCAmelCase ( self ) -> Tuple:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
UpperCamelCase_ = MaskaFormerModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = (self.model_tester.min_size,) * 2
UpperCamelCase_ = {
'pixel_values': torch.randn((2, 3, *size) , device=_UpperCAmelCase ),
'mask_labels': torch.randn((2, 10, *size) , device=_UpperCAmelCase ),
'class_labels': torch.zeros(2 , 10 , device=_UpperCAmelCase ).long(),
}
UpperCamelCase_ = self.model_tester.get_config()
UpperCamelCase_ = MaskaFormerForUniversalSegmentation(_UpperCAmelCase ).to(_UpperCAmelCase )
UpperCamelCase_ = model(**_UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_UpperCAmelCase , **_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_UpperCAmelCase ).to(_UpperCAmelCase )
UpperCamelCase_ = model(**_UpperCAmelCase , output_attentions=_UpperCAmelCase )
self.assertTrue(outputs.attentions is not None )
def _UpperCAmelCase ( self ) -> List[Any]:
if not self.model_tester.is_training:
return
UpperCamelCase_ = self.all_model_classes[1]
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
UpperCamelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
UpperCamelCase_ = model(_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase ).loss
loss.backward()
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = self.all_model_classes[1]
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
UpperCamelCase_ = True
UpperCamelCase_ = True
UpperCamelCase_ = model_class(_UpperCAmelCase ).to(_UpperCAmelCase )
model.train()
UpperCamelCase_ = model(_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase )
UpperCamelCase_ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCamelCase_ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
UpperCamelCase_ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCamelCase_ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_UpperCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
snake_case__ : List[Any] = 1E-4
def _snake_case ():
UpperCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_vision
@slow
class _a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCAmelCase ( self ) -> Optional[int]:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _UpperCAmelCase ( self ) -> List[str]:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase )
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
UpperCamelCase_ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCAmelCase , (1, 3, 384, 384) )
with torch.no_grad():
UpperCamelCase_ = model(**_UpperCAmelCase )
UpperCamelCase_ = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
UpperCamelCase_ = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
UpperCamelCase_ = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase ).eval()
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
UpperCamelCase_ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCAmelCase , (1, 3, 384, 384) )
with torch.no_grad():
UpperCamelCase_ = model(**_UpperCAmelCase )
# masks_queries_logits
UpperCamelCase_ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
UpperCamelCase_ = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
UpperCamelCase_ = torch.tensor(_UpperCAmelCase ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
# class_queries_logits
UpperCamelCase_ = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
UpperCamelCase_ = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase ).eval()
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , )
UpperCamelCase_ = inputs['pixel_values'].to(_UpperCAmelCase )
UpperCamelCase_ = [el.to(_UpperCAmelCase ) for el in inputs['mask_labels']]
UpperCamelCase_ = [el.to(_UpperCAmelCase ) for el in inputs['class_labels']]
with torch.no_grad():
UpperCamelCase_ = model(**_UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
| 23 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
lowerCAmelCase_ : Tuple = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Tuple = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
lowerCAmelCase_ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 435 |
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
snake_case__ : List[str] = logging.get_logger(__name__)
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = """vision-encoder-decoder"""
A_ = True
def __init__( self , **_UpperCAmelCase ) -> Dict:
super().__init__(**_UpperCAmelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f"""A configuraton of type {self.model_type} cannot be instantiated because """
f"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
UpperCamelCase_ = kwargs.pop('encoder' )
UpperCamelCase_ = encoder_config.pop('model_type' )
UpperCamelCase_ = kwargs.pop('decoder' )
UpperCamelCase_ = decoder_config.pop('model_type' )
UpperCamelCase_ = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
UpperCamelCase_ = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
UpperCamelCase_ = True
@classmethod
def _UpperCAmelCase ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> PretrainedConfig:
logger.info('Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
UpperCamelCase_ = True
UpperCamelCase_ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = copy.deepcopy(self.__dict__ )
UpperCamelCase_ = self.encoder.to_dict()
UpperCamelCase_ = self.decoder.to_dict()
UpperCamelCase_ = self.__class__.model_type
return output
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = version.parse("""1.11""" )
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _UpperCAmelCase ( self ) -> float:
return 1e-4
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict({'last_hidden_state': {0: 'batch', 1: 'encoder_sequence'}} )
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
UpperCamelCase_ = OrderedDict()
UpperCamelCase_ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
UpperCamelCase_ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
UpperCamelCase_ = {0: 'batch', 1: 'encoder_sequence'}
return common_inputs
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ) -> Mapping[str, Any]:
import torch
UpperCamelCase_ = OrderedDict()
UpperCamelCase_ = super().generate_dummy_inputs(
_UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = dummy_input['input_ids'].shape
UpperCamelCase_ = (batch, encoder_sequence, self._config.encoder_hidden_size)
UpperCamelCase_ = dummy_input.pop('input_ids' )
UpperCamelCase_ = dummy_input.pop('attention_mask' )
UpperCamelCase_ = torch.zeros(_UpperCAmelCase )
return common_inputs
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
@property
def _UpperCAmelCase ( self ) -> None:
pass
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> OnnxConfig:
return VisionEncoderDecoderEncoderOnnxConfig(_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = "default" ) -> OnnxConfig:
UpperCamelCase_ = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_UpperCAmelCase , _UpperCAmelCase )
| 23 | 0 |
'''simple docstring'''
import os
def A_( ):
UpperCamelCase = os.path.dirname(os.path.realpath(__lowercase))
UpperCamelCase = os.path.join(__lowercase , 'triangle.txt')
with open(__lowercase) as f:
UpperCamelCase = f.readlines()
UpperCamelCase = []
for line in triangle:
UpperCamelCase = []
for number in line.strip().split(' '):
numbers_from_line.append(int(__lowercase))
a.append(__lowercase)
for i in range(1 , len(__lowercase)):
for j in range(len(a[i])):
UpperCamelCase = a[i - 1][j] if j != len(a[i - 1]) else 0
UpperCamelCase = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(__lowercase , __lowercase)
return max(a[-1])
if __name__ == "__main__":
print(solution())
| 3 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _snake_case (__lowercase , __lowercase , __lowercase):
# Initialise PyTorch model
UpperCamelCase_ = MobileBertConfig.from_json_file(__lowercase)
print(f"""Building PyTorch model from configuration: {config}""")
UpperCamelCase_ = MobileBertForPreTraining(__lowercase)
# Load weights from tf checkpoint
UpperCamelCase_ = load_tf_weights_in_mobilebert(__lowercase , __lowercase , __lowercase)
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""")
torch.save(model.state_dict() , __lowercase)
if __name__ == "__main__":
snake_case__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--mobilebert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained MobileBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
snake_case__ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 23 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : Any ) -> Tuple:
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 69 |
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _a ( unittest.TestCase ):
"""simple docstring"""
A_ = MODEL_FOR_MASKED_LM_MAPPING
A_ = TF_MODEL_FOR_MASKED_LM_MAPPING
def _UpperCAmelCase ( self ) -> List[str]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='tf' )
UpperCamelCase_ = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{'sequence': 'My name is grouped', 'score': 2.1e-05, 'token': 38015, 'token_str': ' grouped'},
{'sequence': 'My name is accuser', 'score': 2.1e-05, 'token': 25506, 'token_str': ' accuser'},
] , )
UpperCamelCase_ = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{
'sequence': 'The largest city in France is grouped',
'score': 2.1e-05,
'token': 38015,
'token_str': ' grouped',
},
{
'sequence': 'The largest city in France is accuser',
'score': 2.1e-05,
'token': 25506,
'token_str': ' accuser',
},
] , )
UpperCamelCase_ = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{'sequence': 'My name is Clara', 'score': 2e-05, 'token': 13606, 'token_str': ' Clara'},
{'sequence': 'My name is Patrick', 'score': 2e-05, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 1.9e-05, 'token': 2941, 'token_str': ' Te'},
] , )
@require_torch
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='pt' )
UpperCamelCase_ = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{'sequence': 'My name is Maul', 'score': 2.2e-05, 'token': 35676, 'token_str': ' Maul'},
{'sequence': 'My name isELS', 'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS'},
] , )
UpperCamelCase_ = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{
'sequence': 'The largest city in France is Maul',
'score': 2.2e-05,
'token': 35676,
'token_str': ' Maul',
},
{'sequence': 'The largest city in France isELS', 'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS'},
] , )
UpperCamelCase_ = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{'sequence': 'My name is Patrick', 'score': 2.1e-05, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 2e-05, 'token': 2941, 'token_str': ' Te'},
{'sequence': 'My name is Clara', 'score': 2e-05, 'token': 13606, 'token_str': ' Clara'},
] , )
UpperCamelCase_ = unmasker('My name is <mask> <mask>' , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
[
{
'score': 2.2e-05,
'token': 35676,
'token_str': ' Maul',
'sequence': '<s>My name is Maul<mask></s>',
},
{'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name isELS<mask></s>'},
],
[
{
'score': 2.2e-05,
'token': 35676,
'token_str': ' Maul',
'sequence': '<s>My name is<mask> Maul</s>',
},
{'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name is<mask>ELS</s>'},
],
] , )
@require_torch_gpu
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = pipeline('fill-mask' , model='hf-internal-testing/tiny-random-distilbert' , device=0 , framework='pt' )
# convert model to fp16
pipe.model.half()
UpperCamelCase_ = pipe('Paris is the [MASK] of France.' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
@slow
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='pt' )
self.run_large_test(_UpperCAmelCase )
@slow
@require_tf
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='tf' )
self.run_large_test(_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Tuple:
UpperCamelCase_ = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , [
{'sequence': 'My name is John', 'score': 0.0_0_8, 'token': 610, 'token_str': ' John'},
{'sequence': 'My name is Chris', 'score': 0.0_0_7, 'token': 1573, 'token_str': ' Chris'},
] , )
UpperCamelCase_ = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , [
{
'sequence': 'The largest city in France is Paris',
'score': 0.2_5_1,
'token': 2201,
'token_str': ' Paris',
},
{
'sequence': 'The largest city in France is Lyon',
'score': 0.2_1_4,
'token': 12790,
'token_str': ' Lyon',
},
] , )
UpperCamelCase_ = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , [
{'sequence': 'My name is Patrick', 'score': 0.0_0_5, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Clara', 'score': 0.0_0_0, 'token': 13606, 'token_str': ' Clara'},
{'sequence': 'My name is Te', 'score': 0.0_0_0, 'token': 2941, 'token_str': ' Te'},
] , )
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='pt' )
UpperCamelCase_ = None
UpperCamelCase_ = None
self.run_pipeline_test(_UpperCAmelCase , [] )
@require_tf
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='tf' )
UpperCamelCase_ = None
UpperCamelCase_ = None
self.run_pipeline_test(_UpperCAmelCase , [] )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('The provided tokenizer has no mask token, (probably reformer or wav2vec2)' )
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = [
f"""This is another {tokenizer.mask_token} test""",
]
return fill_masker, examples
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
UpperCamelCase_ = fill_masker.tokenizer
UpperCamelCase_ = fill_masker.model
UpperCamelCase_ = fill_masker(
f"""This is a {tokenizer.mask_token}""" , )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = fill_masker([f"""This is a {tokenizer.mask_token}"""] )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = fill_masker([f"""This is a {tokenizer.mask_token}""", f"""Another {tokenizer.mask_token} great test."""] )
self.assertEqual(
_UpperCAmelCase , [
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
] , )
with self.assertRaises(_UpperCAmelCase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_UpperCAmelCase ):
fill_masker('This is' )
self.run_test_top_k(_UpperCAmelCase , _UpperCAmelCase )
self.run_test_targets(_UpperCAmelCase , _UpperCAmelCase )
self.run_test_top_k_targets(_UpperCAmelCase , _UpperCAmelCase )
self.fill_mask_with_duplicate_targets_and_top_k(_UpperCAmelCase , _UpperCAmelCase )
self.fill_mask_with_multiple_masks(_UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
UpperCamelCase_ = tokenizer.get_vocab()
UpperCamelCase_ = sorted(vocab.keys() )[:2]
# Pipeline argument
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , targets=_UpperCAmelCase )
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , _UpperCAmelCase )
UpperCamelCase_ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(_UpperCAmelCase ) )
# Call argument
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , _UpperCAmelCase )
UpperCamelCase_ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(_UpperCAmelCase ) )
# Score equivalence
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase )
UpperCamelCase_ = [top_mask['token_str'] for top_mask in outputs]
UpperCamelCase_ = [top_mask['score'] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_UpperCAmelCase ) == set(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase )
UpperCamelCase_ = [top_mask['score'] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) )
# Raises with invalid
with self.assertRaises(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[''] )
with self.assertRaises(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets='' )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , top_k=2 )
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
UpperCamelCase_ = tokenizer.get_vocab()
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
# top_k=2, ntargets=3
UpperCamelCase_ = sorted(vocab.keys() )[:3]
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=_UpperCAmelCase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
UpperCamelCase_ = [el['token_str'] for el in sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x["score"] , reverse=_UpperCAmelCase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_UpperCAmelCase ).issubset(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=_UpperCAmelCase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = tokenizer.get_vocab()
# String duplicates + id duplicates
UpperCamelCase_ = sorted(vocab.keys() )[:3]
UpperCamelCase_ = [targets[0], targets[1], targets[0], targets[2], targets[1]]
UpperCamelCase_ = fill_masker(f"""My name is {tokenizer.mask_token}""" , targets=_UpperCAmelCase , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_UpperCAmelCase ) , 3 )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = fill_masker(
f"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
_UpperCAmelCase , [
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
] , )
| 23 | 0 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def UpperCamelCase( __UpperCamelCase : int = 8 ):
lowerCAmelCase_ : Dict = ascii_letters + digits + punctuation
return "".join(secrets.choice(__lowercase ) for _ in range(__lowercase ) )
def UpperCamelCase( __UpperCamelCase : Dict ,__UpperCamelCase : Union[str, Any] ):
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(__lowercase )
lowerCAmelCase_ : List[str] = i // 3
lowerCAmelCase_ : int = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
lowerCAmelCase_ : str = (
chars_incl
+ random(__lowercase ,quotient + remainder )
+ random(__lowercase ,__lowercase )
+ random(__lowercase ,__lowercase )
)
lowerCAmelCase_ : Tuple = list(__lowercase )
shuffle(__lowercase )
return "".join(__lowercase )
# random is a generalised function for letters, characters and numbers
def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : List[str] ):
return "".join(secrets.choice(__lowercase ) for _ in range(__lowercase ) )
def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : List[Any] ):
pass # Put your code here...
def UpperCamelCase( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Tuple ):
pass # Put your code here...
def UpperCamelCase( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Tuple ):
pass # Put your code here...
def UpperCamelCase( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Any = 8 ):
if len(__lowercase ) < min_length:
# Your Password must be at least 8 characters long
return False
lowerCAmelCase_ : Dict = any(char in ascii_uppercase for char in password )
lowerCAmelCase_ : Tuple = any(char in ascii_lowercase for char in password )
lowerCAmelCase_ : Optional[Any] = any(char in digits for char in password )
lowerCAmelCase_ : Union[str, Any] = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def UpperCamelCase( ):
lowerCAmelCase_ : Union[str, Any] = int(input('''Please indicate the max length of your password: ''' ).strip() )
lowerCAmelCase_ : str = input(
'''Please indicate the characters that must be in your password: ''' ).strip()
print('''Password generated:''' ,password_generator(__lowercase ) )
print(
'''Alternative Password generated:''' ,alternative_password_generator(__lowercase ,__lowercase ) ,)
print('''[If you are thinking of using this passsword, You better save it.]''' )
if __name__ == "__main__":
main()
| 171 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = StableDiffusionSAGPipeline
A_ = TEXT_TO_IMAGE_PARAMS
A_ = TEXT_TO_IMAGE_BATCH_PARAMS
A_ = TEXT_TO_IMAGE_IMAGE_PARAMS
A_ = TEXT_TO_IMAGE_IMAGE_PARAMS
A_ = False
def _UpperCAmelCase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCamelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
UpperCamelCase_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , )
torch.manual_seed(0 )
UpperCamelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCamelCase_ = CLIPTextModel(_UpperCAmelCase )
UpperCamelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCamelCase_ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase=0 ) -> List[Any]:
if str(_UpperCAmelCase ).startswith('mps' ):
UpperCamelCase_ = torch.manual_seed(_UpperCAmelCase )
else:
UpperCamelCase_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
UpperCamelCase_ = {
'prompt': '.',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 1.0,
'sag_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def _UpperCAmelCase ( self ) -> Tuple:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
UpperCamelCase_ = sag_pipe.to(_UpperCAmelCase )
sag_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCamelCase_ = '.'
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = sag_pipe(
[prompt] , generator=_UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
UpperCamelCase_ = output.images
UpperCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase_ = np.array([0.1_5_6_8, 0.1_7_3_8, 0.1_6_9_5, 0.1_6_9_3, 0.1_5_0_7, 0.1_7_0_5, 0.1_5_4_7, 0.1_7_5_1, 0.1_9_4_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
UpperCamelCase_ = sag_pipe.to(_UpperCAmelCase )
sag_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCamelCase_ = '.'
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = sag_pipe(
[prompt] , generator=_UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
UpperCamelCase_ = output.images
UpperCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase_ = np.array([0.3_4_5_9, 0.2_8_7_6, 0.2_5_3_7, 0.3_0_0_2, 0.2_6_7_1, 0.2_1_6_0, 0.3_0_2_6, 0.2_2_6_2, 0.2_3_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
UpperCamelCase_ = sag_pipe.to(_UpperCAmelCase )
sag_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCamelCase_ = '.'
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = sag_pipe(
[prompt] , width=768 , height=512 , generator=_UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' , )
UpperCamelCase_ = output.images
assert image.shape == (1, 512, 768, 3)
| 23 | 0 |
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class _A :
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=13 ,SCREAMING_SNAKE_CASE_=7 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=99 ,SCREAMING_SNAKE_CASE_=64 ,SCREAMING_SNAKE_CASE_=5 ,SCREAMING_SNAKE_CASE_=4 ,SCREAMING_SNAKE_CASE_=37 ,SCREAMING_SNAKE_CASE_="gelu" ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=512 ,SCREAMING_SNAKE_CASE_=16 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=4 ,SCREAMING_SNAKE_CASE_=None ,):
'''simple docstring'''
snake_case : Dict = parent
snake_case : Dict = batch_size
snake_case : List[str] = seq_length
snake_case : Dict = is_training
snake_case : int = use_input_mask
snake_case : Any = use_token_type_ids
snake_case : str = use_labels
snake_case : List[str] = vocab_size
snake_case : int = hidden_size
snake_case : List[str] = num_hidden_layers
snake_case : Optional[Any] = num_attention_heads
snake_case : Optional[int] = intermediate_size
snake_case : Optional[int] = hidden_act
snake_case : Union[str, Any] = hidden_dropout_prob
snake_case : List[str] = attention_probs_dropout_prob
snake_case : Union[str, Any] = max_position_embeddings
snake_case : Dict = type_vocab_size
snake_case : str = type_sequence_label_size
snake_case : Dict = initializer_range
snake_case : Optional[int] = num_labels
snake_case : List[str] = num_choices
snake_case : Tuple = scope
snake_case : Union[str, Any] = vocab_size - 1
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
snake_case : Tuple = None
if self.use_input_mask:
snake_case : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : int = None
if self.use_labels:
snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
snake_case : Dict = self.get_config()
return config, input_ids, input_mask, token_labels
def snake_case_ ( self ):
'''simple docstring'''
return GPTNeoXConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_UpperCAmelCase ,initializer_range=self.initializer_range ,pad_token_id=self.pad_token_id ,)
def snake_case_ ( self ):
'''simple docstring'''
snake_case , snake_case , snake_case , snake_case : List[str] = self.prepare_config_and_inputs()
snake_case : Dict = True
return config, input_ids, input_mask, token_labels
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Dict = GPTNeoXModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case : str = model(_UpperCAmelCase ,attention_mask=_UpperCAmelCase )
snake_case : str = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Union[str, Any] = True
snake_case : Optional[Any] = GPTNeoXModel(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case : Union[str, Any] = model(_UpperCAmelCase ,attention_mask=_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Dict = GPTNeoXForCausalLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case : List[Any] = model(_UpperCAmelCase ,attention_mask=_UpperCAmelCase ,labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Dict = self.num_labels
snake_case : Dict = GPTNeoXForQuestionAnswering(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case : Optional[int] = model(_UpperCAmelCase ,attention_mask=_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Any = self.num_labels
snake_case : int = GPTNeoXForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case : int = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
snake_case : Optional[Any] = model(_UpperCAmelCase ,attention_mask=_UpperCAmelCase ,labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Any = self.num_labels
snake_case : Optional[int] = GPTNeoXForTokenClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case : List[str] = model(_UpperCAmelCase ,attention_mask=_UpperCAmelCase ,labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Optional[int] = True
snake_case : Optional[int] = GPTNeoXForCausalLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
# first forward pass
snake_case : List[str] = model(_UpperCAmelCase ,attention_mask=_UpperCAmelCase ,use_cache=_UpperCAmelCase )
snake_case : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case : Optional[int] = ids_tensor((self.batch_size, 3) ,config.vocab_size )
snake_case : Optional[int] = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
snake_case : Union[str, Any] = torch.cat([input_ids, next_tokens] ,dim=-1 )
snake_case : str = torch.cat([input_mask, next_mask] ,dim=-1 )
snake_case : List[str] = model(_UpperCAmelCase ,attention_mask=_UpperCAmelCase ,output_hidden_states=_UpperCAmelCase )
snake_case : Union[str, Any] = output_from_no_past["""hidden_states"""][0]
snake_case : Tuple = model(
_UpperCAmelCase ,attention_mask=_UpperCAmelCase ,past_key_values=_UpperCAmelCase ,output_hidden_states=_UpperCAmelCase ,)["""hidden_states"""][0]
# select random slice
snake_case : Dict = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
snake_case : int = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case : Union[str, Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_UpperCAmelCase ,_UpperCAmelCase ,atol=1E-3 ) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Optional[int] = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case , snake_case : List[str] = config_and_inputs
snake_case : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _A ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Tuple = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
__lowerCamelCase : str = (GPTNeoXForCausalLM,) if is_torch_available() else ()
__lowerCamelCase : Optional[Any] = (
{
'''feature-extraction''': GPTNeoXModel,
'''question-answering''': GPTNeoXForQuestionAnswering,
'''text-classification''': GPTNeoXForSequenceClassification,
'''text-generation''': GPTNeoXForCausalLM,
'''token-classification''': GPTNeoXForTokenClassification,
'''zero-shot''': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCamelCase : str = False
__lowerCamelCase : Tuple = False
__lowerCamelCase : Any = False
__lowerCamelCase : int = False
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = GPTNeoXModelTester(self )
snake_case : Optional[int] = ConfigTester(self ,config_class=_UpperCAmelCase ,hidden_size=64 ,num_attention_heads=8 )
def snake_case_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case_ ( self ):
'''simple docstring'''
snake_case , snake_case , snake_case , snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
def snake_case_ ( self ):
'''simple docstring'''
snake_case , snake_case , snake_case , snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
def snake_case_ ( self ):
'''simple docstring'''
# This regression test was failing with PyTorch < 1.3
snake_case , snake_case , snake_case , snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
snake_case : Dict = None
self.model_tester.create_and_check_model_as_decoder(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
def snake_case_ ( self ):
'''simple docstring'''
snake_case , snake_case , snake_case , snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*_UpperCAmelCase )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@unittest.skip(reason="""Feed forward chunking is not implemented""" )
def snake_case_ ( self ):
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case , snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : Optional[Any] = ids_tensor([1, 10] ,config.vocab_size )
snake_case : Optional[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case : Optional[Any] = GPTNeoXModel(_UpperCAmelCase )
original_model.to(_UpperCAmelCase )
original_model.eval()
snake_case : Optional[Any] = original_model(_UpperCAmelCase ).last_hidden_state
snake_case : str = original_model(_UpperCAmelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case : str = {"""type""": scaling_type, """factor""": 10.0}
snake_case : Optional[Any] = GPTNeoXModel(_UpperCAmelCase )
scaled_model.to(_UpperCAmelCase )
scaled_model.eval()
snake_case : List[str] = scaled_model(_UpperCAmelCase ).last_hidden_state
snake_case : Union[str, Any] = scaled_model(_UpperCAmelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_UpperCAmelCase ,_UpperCAmelCase ,atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(_UpperCAmelCase ,_UpperCAmelCase ,atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_UpperCAmelCase ,_UpperCAmelCase ,atol=1E-5 ) )
@require_torch
class _A ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case_ ( self ):
'''simple docstring'''
snake_case : int = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
for checkpointing in [True, False]:
snake_case : Any = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(_UpperCAmelCase )
snake_case : Tuple = tokenizer("""My favorite food is""" ,return_tensors="""pt""" ).to(_UpperCAmelCase )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
snake_case : Union[str, Any] = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure"""
snake_case : str = model.generate(**_UpperCAmelCase ,do_sample=_UpperCAmelCase ,max_new_tokens=20 )
snake_case : Tuple = tokenizer.batch_decode(_UpperCAmelCase )[0]
self.assertEqual(_UpperCAmelCase ,_UpperCAmelCase )
| 36 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
snake_case__ : List[str] = TypeVar("""T""")
def _snake_case (__lowercase):
return (position - 1) // 2
def _snake_case (__lowercase):
return (2 * position) + 1
def _snake_case (__lowercase):
return (2 * position) + 2
class _a ( Generic[T] ):
"""simple docstring"""
def __init__( self ) -> None:
UpperCamelCase_ = []
UpperCamelCase_ = {}
UpperCamelCase_ = 0
def __len__( self ) -> int:
return self.elements
def __repr__( self ) -> str:
return str(self.heap )
def _UpperCAmelCase ( self ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
UpperCamelCase_ = self.elements
self.elements += 1
self._bubble_up(_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
UpperCamelCase_ , UpperCamelCase_ = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
UpperCamelCase_ , UpperCamelCase_ = self.heap[0]
self._bubble_down(_UpperCAmelCase )
return elem
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> None:
# Update the weight of the given key
UpperCamelCase_ = self.position_map[elem]
UpperCamelCase_ = (elem, weight)
if position > 0:
UpperCamelCase_ = get_parent_position(_UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(_UpperCAmelCase )
else:
self._bubble_down(_UpperCAmelCase )
else:
self._bubble_down(_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
UpperCamelCase_ = self.position_map[elem]
if curr_pos == 0:
return None
UpperCamelCase_ = get_parent_position(_UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = self.heap[curr_pos]
UpperCamelCase_ , UpperCamelCase_ = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_up(_UpperCAmelCase )
return None
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
UpperCamelCase_ = self.position_map[elem]
UpperCamelCase_ , UpperCamelCase_ = self.heap[curr_pos]
UpperCamelCase_ = get_child_left_position(_UpperCAmelCase )
UpperCamelCase_ = get_child_right_position(_UpperCAmelCase )
if child_left_position < self.elements and child_right_position < self.elements:
UpperCamelCase_ , UpperCamelCase_ = self.heap[child_left_position]
UpperCamelCase_ , UpperCamelCase_ = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_down(_UpperCAmelCase )
if child_left_position < self.elements:
UpperCamelCase_ , UpperCamelCase_ = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_down(_UpperCAmelCase )
else:
return None
if child_right_position < self.elements:
UpperCamelCase_ , UpperCamelCase_ = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_down(_UpperCAmelCase )
return None
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> None:
# Swap the nodes at the given positions
UpperCamelCase_ = self.heap[nodea_pos][0]
UpperCamelCase_ = self.heap[nodea_pos][0]
UpperCamelCase_ , UpperCamelCase_ = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
UpperCamelCase_ = nodea_pos
UpperCamelCase_ = nodea_pos
class _a ( Generic[T] ):
"""simple docstring"""
def __init__( self ) -> None:
UpperCamelCase_ = {}
UpperCamelCase_ = 0
def __repr__( self ) -> str:
return str(self.connections )
def __len__( self ) -> int:
return self.nodes
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
UpperCamelCase_ = {}
self.nodes += 1
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(_UpperCAmelCase )
self.add_node(_UpperCAmelCase )
UpperCamelCase_ = weight
UpperCamelCase_ = weight
def _snake_case (__lowercase , ):
UpperCamelCase_ = {node: maxsize for node in graph.connections}
UpperCamelCase_ = {node: None for node in graph.connections}
UpperCamelCase_ = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(__lowercase , __lowercase)
if priority_queue.is_empty():
return dist, parent
# initialization
UpperCamelCase_ = priority_queue.extract_min()
UpperCamelCase_ = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
UpperCamelCase_ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowercase , dist[neighbour])
UpperCamelCase_ = node
# running prim's algorithm
while not priority_queue.is_empty():
UpperCamelCase_ = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
UpperCamelCase_ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowercase , dist[neighbour])
UpperCamelCase_ = node
return dist, parent
| 23 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase_ : Optional[int] = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : List[Any] = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
UpperCamelCase_ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 331 |
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
snake_case__ : Dict = TypeVar("""T""")
class _a ( Generic[T] ):
"""simple docstring"""
A_ = 42 # Cache store of keys
A_ = 42 # References of the keys in cache
A_ = 10 # Maximum capacity of cache
def __init__( self , _UpperCAmelCase ) -> None:
UpperCamelCase_ = deque()
UpperCamelCase_ = set()
if not n:
UpperCamelCase_ = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
UpperCamelCase_ = n
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
UpperCamelCase_ = self.dq_store.pop()
self.key_reference.remove(_UpperCAmelCase )
else:
self.dq_store.remove(_UpperCAmelCase )
self.dq_store.appendleft(_UpperCAmelCase )
self.key_reference.add(_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> None:
for k in self.dq_store:
print(_UpperCAmelCase )
def __repr__( self ) -> str:
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ : LRUCache[str | int] = LRUCache(4)
lru_cache.refer("""A""")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("""A""")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 23 | 0 |
from math import factorial
def A_ ( lowercase_ = 20 ) -> Union[str, Any]:
_snake_case : List[Any] = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
_snake_case : Dict = n // 2
return int(factorial(__lowercase ) / (factorial(__lowercase ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(2_0))
else:
try:
lowerCAmelCase_ = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number.")
| 326 |
import numpy as np
def _snake_case (__lowercase):
return 1 / (1 + np.exp(-vector))
def _snake_case (__lowercase):
return vector * sigmoid(__lowercase)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.