code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from __future__ import annotations
import math
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> list[int]:
__snake_case = str(_UpperCAmelCase )
__snake_case = [n]
for i in range(1 , len(_UpperCAmelCase ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> bool:
if len(str(_UpperCAmelCase ) ) > 3:
if not is_prime(int(str(_UpperCAmelCase )[-3:] ) ) or not is_prime(int(str(_UpperCAmelCase )[:3] ) ):
return False
return True
def __UpperCAmelCase ( _UpperCAmelCase : int = 11 ) -> list[int]:
__snake_case = []
__snake_case = 13
while len(_UpperCAmelCase ) != count:
if validate(_UpperCAmelCase ):
__snake_case = list_truncated_nums(_UpperCAmelCase )
if all(is_prime(_UpperCAmelCase ) for i in list_nums ):
list_truncated_primes.append(_UpperCAmelCase )
num += 2
return list_truncated_primes
def __UpperCAmelCase ( ) -> int:
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F'''{sum(compute_truncated_primes(11)) = }''')
| 680 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a : str = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
a : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 680 | 1 |
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
a : List[str] = logging.get_logger(__name__)
a : Union[str, Any] = '''T5Config'''
def __UpperCAmelCase ( _UpperCAmelCase : jnp.array , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> jnp.ndarray:
__snake_case = jnp.zeros_like(_UpperCAmelCase )
__snake_case = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
__snake_case = shifted_input_ids.at[:, 0].set(_UpperCAmelCase )
__snake_case = jnp.where(shifted_input_ids == -1_00 , _UpperCAmelCase , _UpperCAmelCase )
return shifted_input_ids
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """mt5"""
__SCREAMING_SNAKE_CASE = MTaConfig
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """mt5"""
__SCREAMING_SNAKE_CASE = MTaConfig
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """mt5"""
__SCREAMING_SNAKE_CASE = MTaConfig
| 680 |
'''simple docstring'''
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
a : Optional[Any] = float('''nan''')
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any , a_ : Optional[int] ):
"""simple docstring"""
__snake_case = sys.stdout
__snake_case = open(a_ , "a" )
def __getattr__( self : str , a_ : List[Any] ):
"""simple docstring"""
return getattr(self.stdout , a_ )
def A ( self : Union[str, Any] , a_ : List[Any] ):
"""simple docstring"""
self.stdout.write(a_ )
# strip tqdm codes
self.file.write(re.sub(r"^.*\r" , "" , a_ , 0 , re.M ) )
def __UpperCAmelCase ( _UpperCAmelCase : int=80 , _UpperCAmelCase : Any=False ) -> Optional[int]:
__snake_case = []
# deal with critical env vars
__snake_case = ["CUDA_VISIBLE_DEVICES"]
for key in env_keys:
__snake_case = os.environ.get(_UpperCAmelCase , _UpperCAmelCase )
if val is not None:
cmd.append(F'''{key}={val}''' )
# python executable (not always needed if the script is executable)
__snake_case = sys.executable if full_python_path else sys.executable.split("/" )[-1]
cmd.append(_UpperCAmelCase )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
__snake_case = []
__snake_case = ""
while len(_UpperCAmelCase ) > 0:
current_line += F'''{cmd.pop(0 )} '''
if len(_UpperCAmelCase ) == 0 or len(_UpperCAmelCase ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(_UpperCAmelCase )
__snake_case = ""
return "\\\n".join(_UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Tuple:
# unwrap multi-line input
__snake_case = re.sub(R"[\\\n]+" , " " , args.base_cmd )
# remove --output_dir if any and set our own
__snake_case = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd )
args.base_cmd += F''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
__snake_case = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Any ) -> str:
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 1_00 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.2222_2222] )} , )
__snake_case = subprocess.run(_UpperCAmelCase , capture_output=_UpperCAmelCase , text=_UpperCAmelCase )
if verbose:
print("STDOUT" , result.stdout )
print("STDERR" , result.stderr )
# save the streams
__snake_case = variation.replace(" " , "-" )
with open(Path(_UpperCAmelCase ) / F'''log.{prefix}.stdout.txt''' , "w" ) as f:
f.write(result.stdout )
with open(Path(_UpperCAmelCase ) / F'''log.{prefix}.stderr.txt''' , "w" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("failed" )
return {target_metric_key: nan}
with io.open(F'''{output_dir}/all_results.json''' , "r" , encoding="utf-8" ) as f:
__snake_case = json.load(_UpperCAmelCase )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict , ) -> Dict:
__snake_case = []
__snake_case = []
__snake_case = F'''{id}: {variation:<{longest_variation_len}}'''
__snake_case = F'''{preamble}: '''
__snake_case = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(_UpperCAmelCase ) , desc=_UpperCAmelCase , leave=_UpperCAmelCase ):
__snake_case = process_run_single(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__snake_case = single_run_metrics[target_metric_key]
if not math.isnan(_UpperCAmelCase ):
metrics.append(_UpperCAmelCase )
results.append(_UpperCAmelCase )
outcome += "✓"
else:
outcome += "✘"
__snake_case = F'''\33[2K\r{outcome}'''
if len(_UpperCAmelCase ) > 0:
__snake_case = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
__snake_case = round(mean_metrics[target_metric_key] , 2 )
__snake_case = F'''{outcome} {mean_target}'''
if len(_UpperCAmelCase ) > 1:
results_str += F''' {tuple(round(_UpperCAmelCase , 2 ) for x in results )}'''
print(_UpperCAmelCase )
__snake_case = variation
return mean_metrics
else:
print(_UpperCAmelCase )
return {variation_key: variation, target_metric_key: nan}
def __UpperCAmelCase ( ) -> Optional[int]:
__snake_case = torch.cuda.get_device_properties(torch.device("cuda" ) )
return F'''
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
'''
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple ) -> List[Any]:
__snake_case = pd.DataFrame(_UpperCAmelCase )
__snake_case = "variation"
__snake_case = "diff_%"
__snake_case = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
__snake_case = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(_UpperCAmelCase ):
# as a fallback, use the minimal value as the sentinel
__snake_case = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(_UpperCAmelCase ):
__snake_case = df.apply(
lambda _UpperCAmelCase : round(1_00 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="columns" , )
# re-order columns
__snake_case = [variation_key, target_metric_key, diff_key, *report_metric_keys]
__snake_case = df.reindex(_UpperCAmelCase , axis="columns" ) # reorder cols
# capitalize
__snake_case = df.rename(str.capitalize , axis="columns" )
# make the cols as narrow as possible
__snake_case = df.rename(lambda _UpperCAmelCase : c.replace("_" , "<br>" ) , axis="columns" )
__snake_case = df.rename(lambda _UpperCAmelCase : c.replace("_" , "\n" ) , axis="columns" )
__snake_case = ["", "Copy between the cut-here-lines and paste as is to github or a forum"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=_UpperCAmelCase , floatfmt=".2f" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=_UpperCAmelCase , floatfmt=".2f" )]
print("\n\n".join(_UpperCAmelCase ) )
def __UpperCAmelCase ( ) -> Dict:
__snake_case = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Base cmd" , )
parser.add_argument(
"--variations" , default=_UpperCAmelCase , type=_UpperCAmelCase , nargs="+" , required=_UpperCAmelCase , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , )
parser.add_argument(
"--base-variation" , default=_UpperCAmelCase , type=_UpperCAmelCase , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , )
parser.add_argument(
"--target-metric-key" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , )
parser.add_argument(
"--report-metric-keys" , default="" , type=_UpperCAmelCase , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , )
parser.add_argument(
"--repeat-times" , default=1 , type=_UpperCAmelCase , help="How many times to re-run each variation - an average will be reported" , )
parser.add_argument(
"--output_dir" , default="output_benchmark" , type=_UpperCAmelCase , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , )
parser.add_argument(
"--verbose" , default=_UpperCAmelCase , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , )
__snake_case = parser.parse_args()
__snake_case = args.output_dir
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
__snake_case = get_base_command(_UpperCAmelCase , _UpperCAmelCase )
# split each dimension into its --foo variations
__snake_case = [list(map(str.strip , re.split(R"\|" , _UpperCAmelCase ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
__snake_case = list(map(str.strip , map(" ".join , itertools.product(*_UpperCAmelCase ) ) ) )
__snake_case = max(len(_UpperCAmelCase ) for x in variations )
# split wanted keys
__snake_case = args.report_metric_keys.split()
# capture prints into a log file for convenience
__snake_case = F'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'''
print(F'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(F'''and this script\'s output is also piped into {report_fn}''' )
__snake_case = Tee(_UpperCAmelCase )
print(F'''\n*** Running {len(_UpperCAmelCase )} benchmarks:''' )
print(F'''Base command: {" ".join(_UpperCAmelCase )}''' )
__snake_case = "variation"
__snake_case = []
for id, variation in enumerate(tqdm(_UpperCAmelCase , desc="Total completion: " , leave=_UpperCAmelCase ) ):
__snake_case = base_cmd + variation.split()
results.append(
process_run(
id + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , args.target_metric_key , _UpperCAmelCase , args.repeat_times , _UpperCAmelCase , args.verbose , ) )
process_results(_UpperCAmelCase , args.target_metric_key , _UpperCAmelCase , args.base_variation , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 680 | 1 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def __UpperCAmelCase ( _UpperCAmelCase : Callable , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float ) -> np.ndarray:
__snake_case = int(np.ceil((x_end - xa) / step_size ) )
__snake_case = np.zeros((n + 1,) )
__snake_case = ya
__snake_case = xa
for k in range(_UpperCAmelCase ):
__snake_case = y[k] + step_size * ode_func(_UpperCAmelCase , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 |
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def __UpperCAmelCase ( _UpperCAmelCase : Dict ) -> int: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def __UpperCAmelCase ( ) -> Dict:
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
__snake_case = [1, 2, 3]
with pytest.raises(_UpperCAmelCase ):
with parallel_backend("unsupported backend" ):
map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=2 )
with pytest.raises(_UpperCAmelCase ):
with parallel_backend("unsupported backend" ):
map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" , [2, -1] )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] ) -> Optional[int]:
__snake_case = [1, 2]
__snake_case = {"a": 1, "b": 2}
__snake_case = {"a": [1, 2], "b": [3, 4]}
__snake_case = {"a": {"1": 1}, "b": 2}
__snake_case = {"a": 1, "b": 2, "c": 3, "d": 4}
__snake_case = [2, 3]
__snake_case = {"a": 2, "b": 3}
__snake_case = {"a": [2, 3], "b": [4, 5]}
__snake_case = {"a": {"1": 2}, "b": 3}
__snake_case = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
| 680 | 1 |
'''simple docstring'''
from math import sqrt
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __UpperCAmelCase ( _UpperCAmelCase : int = 1_00_01 ) -> int:
__snake_case = 0
__snake_case = 1
while count != nth and number < 3:
number += 1
if is_prime(_UpperCAmelCase ):
count += 1
while count != nth:
number += 2
if is_prime(_UpperCAmelCase ):
count += 1
return number
if __name__ == "__main__":
print(F'''{solution() = }''')
| 680 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : int = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """mobilenet_v2"""
def __init__( self : Tuple , a_ : int=3 , a_ : int=224 , a_ : List[Any]=1.0 , a_ : List[str]=8 , a_ : Dict=8 , a_ : Optional[Any]=6 , a_ : Optional[Any]=32 , a_ : str=True , a_ : Union[str, Any]=True , a_ : List[Any]="relu6" , a_ : Optional[Any]=True , a_ : Any=0.8 , a_ : Dict=0.02 , a_ : Optional[int]=0.001 , a_ : Optional[int]=255 , **a_ : List[str] , ):
"""simple docstring"""
super().__init__(**a_ )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
__snake_case = num_channels
__snake_case = image_size
__snake_case = depth_multiplier
__snake_case = depth_divisible_by
__snake_case = min_depth
__snake_case = expand_ratio
__snake_case = output_stride
__snake_case = first_layer_is_expansion
__snake_case = finegrained_output
__snake_case = hidden_act
__snake_case = tf_padding
__snake_case = classifier_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = semantic_loss_ignore_index
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = version.parse("""1.11""" )
@property
def A ( self : Optional[int] ):
"""simple docstring"""
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def A ( self : Optional[int] ):
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def A ( self : int ):
"""simple docstring"""
return 1e-4
| 680 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : Tuple = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
a : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 680 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : List[Any] = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """data2vec-text"""
def __init__( self : List[str] , a_ : str=30_522 , a_ : Optional[int]=768 , a_ : Dict=12 , a_ : int=12 , a_ : Dict=3_072 , a_ : Dict="gelu" , a_ : Optional[Any]=0.1 , a_ : List[str]=0.1 , a_ : int=512 , a_ : Any=2 , a_ : int=0.02 , a_ : Dict=1e-12 , a_ : Dict=1 , a_ : Any=0 , a_ : Dict=2 , a_ : Optional[int]="absolute" , a_ : List[Any]=True , a_ : Dict=None , **a_ : List[str] , ):
"""simple docstring"""
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = position_embedding_type
__snake_case = use_cache
__snake_case = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
@property
def A ( self : Any ):
"""simple docstring"""
if self.task == "multiple-choice":
__snake_case = {0: "batch", 1: "choice", 2: "sequence"}
else:
__snake_case = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 680 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["""flax"""]
def __init__( self : Dict , *a_ : int , **a_ : Tuple ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def A ( cls : Optional[Any] , *a_ : Optional[int] , **a_ : Tuple ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def A ( cls : Tuple , *a_ : str , **a_ : Dict ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["""flax"""]
def __init__( self : List[str] , *a_ : Tuple , **a_ : Tuple ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def A ( cls : Union[str, Any] , *a_ : Union[str, Any] , **a_ : Tuple ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def A ( cls : List[str] , *a_ : int , **a_ : List[str] ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["""flax"""]
def __init__( self : List[str] , *a_ : Union[str, Any] , **a_ : Dict ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def A ( cls : Optional[Any] , *a_ : Tuple , **a_ : int ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def A ( cls : List[Any] , *a_ : Any , **a_ : Dict ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["""flax"""]
def __init__( self : List[str] , *a_ : Any , **a_ : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def A ( cls : str , *a_ : Optional[int] , **a_ : int ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def A ( cls : Optional[int] , *a_ : List[Any] , **a_ : Optional[int] ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["""flax"""]
def __init__( self : Dict , *a_ : Union[str, Any] , **a_ : Dict ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def A ( cls : int , *a_ : Dict , **a_ : List[str] ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def A ( cls : int , *a_ : str , **a_ : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["""flax"""]
def __init__( self : Union[str, Any] , *a_ : Any , **a_ : List[str] ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def A ( cls : str , *a_ : Tuple , **a_ : Optional[int] ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def A ( cls : Union[str, Any] , *a_ : Tuple , **a_ : List[str] ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["""flax"""]
def __init__( self : Any , *a_ : Any , **a_ : Optional[int] ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def A ( cls : Union[str, Any] , *a_ : Optional[int] , **a_ : List[Any] ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def A ( cls : int , *a_ : Tuple , **a_ : Any ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["""flax"""]
def __init__( self : Optional[Any] , *a_ : List[Any] , **a_ : List[str] ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def A ( cls : List[Any] , *a_ : Optional[int] , **a_ : str ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def A ( cls : List[str] , *a_ : Optional[int] , **a_ : Tuple ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["""flax"""]
def __init__( self : str , *a_ : Any , **a_ : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def A ( cls : Optional[int] , *a_ : int , **a_ : List[Any] ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def A ( cls : str , *a_ : Dict , **a_ : Dict ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["""flax"""]
def __init__( self : List[str] , *a_ : Dict , **a_ : int ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def A ( cls : str , *a_ : Optional[Any] , **a_ : Any ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def A ( cls : List[Any] , *a_ : Optional[int] , **a_ : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["""flax"""]
def __init__( self : Tuple , *a_ : List[str] , **a_ : int ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def A ( cls : Optional[int] , *a_ : Any , **a_ : Tuple ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def A ( cls : Optional[Any] , *a_ : Any , **a_ : Any ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["""flax"""]
def __init__( self : List[Any] , *a_ : Dict , **a_ : Any ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def A ( cls : Dict , *a_ : Any , **a_ : List[Any] ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def A ( cls : Any , *a_ : List[str] , **a_ : str ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["""flax"""]
def __init__( self : Optional[int] , *a_ : List[Any] , **a_ : List[str] ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def A ( cls : Any , *a_ : int , **a_ : Any ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def A ( cls : List[Any] , *a_ : Union[str, Any] , **a_ : Any ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
| 680 |
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
a : Tuple = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def A ( self : Union[str, Any] , a_ : List[str] , a_ : Optional[int] , a_ : List[str]=None , a_ : Any=None ):
"""simple docstring"""
__snake_case = self.layer[current_layer](a_ , a_ , head_mask[current_layer] )
__snake_case = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"""The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.""" , _UpperCamelCase , )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def __init__( self : int , a_ : int ):
"""simple docstring"""
super().__init__(a_ )
__snake_case = BertEncoderWithPabee(a_ )
self.init_weights()
__snake_case = 0
__snake_case = 0
__snake_case = 0
__snake_case = 0
def A ( self : Optional[int] , a_ : Union[str, Any] ):
"""simple docstring"""
__snake_case = threshold
def A ( self : Optional[Any] , a_ : Union[str, Any] ):
"""simple docstring"""
__snake_case = patience
def A ( self : Any ):
"""simple docstring"""
__snake_case = 0
__snake_case = 0
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.inference_layers_num / self.inference_instances_num
__snake_case = (
f'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
f''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(a_ )
@add_start_docstrings_to_model_forward(a_ )
def A ( self : Dict , a_ : Optional[Any]=None , a_ : Union[str, Any]=None , a_ : int=None , a_ : Optional[int]=None , a_ : int=None , a_ : Optional[Any]=None , a_ : Union[str, Any]=None , a_ : int=None , a_ : Any=None , a_ : Optional[Any]=None , a_ : Any=False , ):
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
__snake_case = input_ids.size()
elif inputs_embeds is not None:
__snake_case = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
__snake_case = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__snake_case = torch.ones(a_ , device=a_ )
if token_type_ids is None:
__snake_case = torch.zeros(a_ , dtype=torch.long , device=a_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__snake_case = self.get_extended_attention_mask(a_ , a_ , a_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
__snake_case , __snake_case , __snake_case = encoder_hidden_states.size()
__snake_case = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
__snake_case = torch.ones(a_ , device=a_ )
__snake_case = self.invert_attention_mask(a_ )
else:
__snake_case = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__snake_case = self.get_head_mask(a_ , self.config.num_hidden_layers )
__snake_case = self.embeddings(
input_ids=a_ , position_ids=a_ , token_type_ids=a_ , inputs_embeds=a_ )
__snake_case = embedding_output
if self.training:
__snake_case = []
for i in range(self.config.num_hidden_layers ):
__snake_case = self.encoder.adaptive_forward(
a_ , current_layer=a_ , attention_mask=a_ , head_mask=a_ )
__snake_case = self.pooler(a_ )
__snake_case = output_layers[i](output_dropout(a_ ) )
res.append(a_ )
elif self.patience == 0: # Use all layers for inference
__snake_case = self.encoder(
a_ , attention_mask=a_ , head_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , )
__snake_case = self.pooler(encoder_outputs[0] )
__snake_case = [output_layers[self.config.num_hidden_layers - 1](a_ )]
else:
__snake_case = 0
__snake_case = None
__snake_case = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
__snake_case = self.encoder.adaptive_forward(
a_ , current_layer=a_ , attention_mask=a_ , head_mask=a_ )
__snake_case = self.pooler(a_ )
__snake_case = output_layers[i](a_ )
if regression:
__snake_case = logits.detach()
if patient_result is not None:
__snake_case = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
__snake_case = 0
else:
__snake_case = logits.detach().argmax(dim=1 )
if patient_result is not None:
__snake_case = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(a_ ) ):
patient_counter += 1
else:
__snake_case = 0
__snake_case = logits
if patient_counter == self.patience:
break
__snake_case = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"""Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """ , _UpperCamelCase , )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def __init__( self : List[str] , a_ : Tuple ):
"""simple docstring"""
super().__init__(a_ )
__snake_case = config.num_labels
__snake_case = BertModelWithPabee(a_ )
__snake_case = nn.Dropout(config.hidden_dropout_prob )
__snake_case = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(a_ )
def A ( self : int , a_ : str=None , a_ : Tuple=None , a_ : Union[str, Any]=None , a_ : List[str]=None , a_ : Optional[int]=None , a_ : Union[str, Any]=None , a_ : Tuple=None , ):
"""simple docstring"""
__snake_case = self.bert(
input_ids=a_ , attention_mask=a_ , token_type_ids=a_ , position_ids=a_ , head_mask=a_ , inputs_embeds=a_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
__snake_case = (logits[-1],)
if labels is not None:
__snake_case = None
__snake_case = 0
for ix, logits_item in enumerate(a_ ):
if self.num_labels == 1:
# We are doing regression
__snake_case = MSELoss()
__snake_case = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
__snake_case = CrossEntropyLoss()
__snake_case = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
__snake_case = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
__snake_case = (total_loss / total_weights,) + outputs
return outputs
| 680 | 1 |
'''simple docstring'''
from manim import *
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = Rectangle(height=0.5 , width=0.5 )
__snake_case = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__snake_case = Rectangle(height=0.25 , width=0.25 )
__snake_case = [mem.copy() for i in range(6 )]
__snake_case = [mem.copy() for i in range(6 )]
__snake_case = VGroup(*a_ ).arrange(a_ , buff=0 )
__snake_case = VGroup(*a_ ).arrange(a_ , buff=0 )
__snake_case = VGroup(a_ , a_ ).arrange(a_ , buff=0 )
__snake_case = Text("CPU" , font_size=24 )
__snake_case = Group(a_ , a_ ).arrange(a_ , buff=0.5 , aligned_edge=a_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(a_ )
__snake_case = [mem.copy() for i in range(4 )]
__snake_case = VGroup(*a_ ).arrange(a_ , buff=0 )
__snake_case = Text("GPU" , font_size=24 )
__snake_case = Group(a_ , a_ ).arrange(a_ , buff=0.5 , aligned_edge=a_ )
gpu.move_to([-1, -1, 0] )
self.add(a_ )
__snake_case = [mem.copy() for i in range(6 )]
__snake_case = VGroup(*a_ ).arrange(a_ , buff=0 )
__snake_case = Text("Model" , font_size=24 )
__snake_case = Group(a_ , a_ ).arrange(a_ , buff=0.5 , aligned_edge=a_ )
model.move_to([3, -1.0, 0] )
self.add(a_ )
__snake_case = []
__snake_case = []
for i, rect in enumerate(a_ ):
__snake_case = fill.copy().set_fill(a_ , opacity=0.8 )
target.move_to(a_ )
model_arr.append(a_ )
__snake_case = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(a_ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(a_ )
self.add(*a_ , *a_ )
__snake_case = [meta_mem.copy() for i in range(6 )]
__snake_case = [meta_mem.copy() for i in range(6 )]
__snake_case = VGroup(*a_ ).arrange(a_ , buff=0 )
__snake_case = VGroup(*a_ ).arrange(a_ , buff=0 )
__snake_case = VGroup(a_ , a_ ).arrange(a_ , buff=0 )
__snake_case = Text("Disk" , font_size=24 )
__snake_case = Group(a_ , a_ ).arrange(a_ , buff=0.5 , aligned_edge=a_ )
disk.move_to([-4, -1.25, 0] )
self.add(a_ , a_ )
__snake_case = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__snake_case = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(a_ , a_ )
__snake_case = MarkupText(
f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(a_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(a_ )
__snake_case = MarkupText(
f'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(a_ ) )
__snake_case = Square(0.3 )
input.set_fill(a_ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , a_ , buff=0.5 )
self.play(Write(a_ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=a_ , buff=0.02 )
self.play(MoveToTarget(a_ ) )
self.play(FadeOut(a_ ) )
__snake_case = Arrow(start=a_ , end=a_ , color=a_ , buff=0.5 )
a.next_to(model_arr[0].get_left() , a_ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
__snake_case = MarkupText(
f'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(a_ , run_time=3 ) )
__snake_case = {"run_time": 1, "fade_in": True, "fade_out": True, "buff": 0.02}
self.play(
Write(a_ ) , Circumscribe(model_arr[0] , color=a_ , **a_ ) , Circumscribe(model_cpu_arr[0] , color=a_ , **a_ ) , Circumscribe(gpu_rect[0] , color=a_ , **a_ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
__snake_case = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , a_ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
__snake_case = AnimationGroup(
FadeOut(a_ , run_time=0.5 ) , MoveToTarget(a_ , run_time=0.5 ) , FadeIn(a_ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(a_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
__snake_case = 0.7
self.play(
Circumscribe(model_arr[i] , **a_ ) , Circumscribe(cpu_left_col_base[i] , **a_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=a_ , **a_ ) , Circumscribe(gpu_rect[0] , color=a_ , **a_ ) , Circumscribe(model_arr[i + 1] , color=a_ , **a_ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=a_ , **a_ ) , Circumscribe(cpu_left_col_base[-1] , color=a_ , **a_ ) , Circumscribe(gpu_rect[0] , color=a_ , **a_ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
__snake_case = a_c
__snake_case = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(a_ ) , FadeOut(a_ , run_time=0.5 ) , )
__snake_case = MarkupText(f'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(a_ , run_time=3 ) , MoveToTarget(a_ ) )
self.wait()
| 680 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self : str , a_ : Tuple , a_ : Optional[Any]=2 , a_ : str=32 , a_ : Dict=16 , a_ : List[str]=3 , a_ : Dict=True , a_ : Optional[int]=True , a_ : List[str]=32 , a_ : int=4 , a_ : str=[0, 1, 2, 3] , a_ : Any=4 , a_ : Optional[int]=37 , a_ : Any="gelu" , a_ : Optional[int]=0.1 , a_ : Optional[Any]=0.1 , a_ : Union[str, Any]=0.02 , a_ : Union[str, Any]=3 , a_ : Any=[1, 384, 24, 24] , a_ : Optional[Any]=True , a_ : Optional[int]=None , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = is_training
__snake_case = use_labels
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = backbone_out_indices
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = backbone_featmap_shape
__snake_case = scope
__snake_case = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__snake_case = (image_size // patch_size) ** 2
__snake_case = num_patches + 1
def A ( self : int ):
"""simple docstring"""
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case = self.get_config()
return config, pixel_values, labels
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [96, 192, 384, 768],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a_ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=a_ , backbone_featmap_shape=self.backbone_featmap_shape , )
def A ( self : int , a_ : Union[str, Any] , a_ : List[str] , a_ : List[str] ):
"""simple docstring"""
__snake_case = DPTModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : List[Any] , a_ : List[Any] , a_ : Union[str, Any] , a_ : List[str] ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = DPTForDepthEstimation(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def A ( self : Optional[Any] , a_ : List[str] , a_ : int , a_ : Tuple ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = DPTForSemanticSegmentation(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , labels=a_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE = (
{
"""depth-estimation""": DPTForDepthEstimation,
"""feature-extraction""": DPTModel,
"""image-segmentation""": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = DPTModelTester(self )
__snake_case = ConfigTester(self , config_class=a_ , has_text_modality=a_ , hidden_size=37 )
def A ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds" )
def A ( self : Any ):
"""simple docstring"""
pass
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_ , nn.Linear ) )
def A ( self : List[str] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a_ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a_ )
def A ( self : int ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*a_ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
def A ( self : Optional[int] ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = True
if model_class in get_values(a_ ):
continue
__snake_case = model_class(a_ )
model.to(a_ )
model.train()
__snake_case = self._prepare_for_class(a_ , a_ , return_labels=a_ )
__snake_case = model(**a_ ).loss
loss.backward()
def A ( self : int ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = False
__snake_case = True
if model_class in get_values(a_ ) or not model_class.supports_gradient_checkpointing:
continue
__snake_case = model_class(a_ )
model.to(a_ )
model.gradient_checkpointing_enable()
model.train()
__snake_case = self._prepare_for_class(a_ , a_ , return_labels=a_ )
__snake_case = model(**a_ ).loss
loss.backward()
def A ( self : Dict ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = _config_zero_init(a_ )
for model_class in self.all_model_classes:
__snake_case = model_class(config=a_ )
# Skip the check for the backbone
__snake_case = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__snake_case = [f'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A ( self : Tuple ):
"""simple docstring"""
pass
@slow
def A ( self : int ):
"""simple docstring"""
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__snake_case = DPTModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def A ( self : int ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = "add"
with self.assertRaises(a_ ):
__snake_case = DPTForDepthEstimation(a_ )
def __UpperCAmelCase ( ) -> Union[str, Any]:
__snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : Dict ):
"""simple docstring"""
__snake_case = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas" )
__snake_case = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas" ).to(a_ )
__snake_case = prepare_img()
__snake_case = image_processor(images=a_ , return_tensors="pt" ).to(a_ )
# forward pass
with torch.no_grad():
__snake_case = model(**a_ )
__snake_case = outputs.predicted_depth
# verify the predicted depth
__snake_case = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , a_ )
__snake_case = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , a_ , atol=1e-4 ) )
| 680 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> int:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError("Input must be an integer" )
if input_num <= 0:
raise ValueError("Input must be positive" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 |
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
def A ( self : Any ):
"""simple docstring"""
return self.__class__(**{k: copy.deepcopy(a_ ) for k, v in self.__dict__.items()} )
| 680 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self : Optional[Any] , a_ : Union[str, Any] , a_ : Union[str, Any]=7 , a_ : List[str]=3 , a_ : List[str]=18 , a_ : Union[str, Any]=30 , a_ : List[str]=400 , a_ : int=True , a_ : List[Any]=None , a_ : str=True , ):
"""simple docstring"""
__snake_case = size if size is not None else {"height": 18, "width": 18}
__snake_case = parent
__snake_case = batch_size
__snake_case = num_channels
__snake_case = image_size
__snake_case = min_resolution
__snake_case = max_resolution
__snake_case = do_resize
__snake_case = size
__snake_case = apply_ocr
def A ( self : Dict ):
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class SCREAMING_SNAKE_CASE__ ( _snake_case , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def A ( self : int ):
"""simple docstring"""
__snake_case = LayoutLMvaImageProcessingTester(self )
@property
def A ( self : int ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : Any ):
"""simple docstring"""
__snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "apply_ocr" ) )
def A ( self : str ):
"""simple docstring"""
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def A ( self : Any ):
"""simple docstring"""
pass
def A ( self : Tuple ):
"""simple docstring"""
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , lowerCAmelCase__ )
self.assertIsInstance(encoding.boxes , lowerCAmelCase__ )
# Test batched
__snake_case = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__snake_case = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def A ( self : int ):
"""simple docstring"""
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__snake_case = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def A ( self : Dict ):
"""simple docstring"""
__snake_case = LayoutLMvaImageProcessor()
from datasets import load_dataset
__snake_case = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
__snake_case = Image.open(ds[0]["file"] ).convert("RGB" )
__snake_case = image_processing(lowerCAmelCase__ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__snake_case = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
__snake_case = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , lowerCAmelCase__ )
self.assertListEqual(encoding.boxes , lowerCAmelCase__ )
# with apply_OCR = False
__snake_case = LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase__ )
__snake_case = image_processing(lowerCAmelCase__ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 701 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
a : Optional[Any] = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : int ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = "A painting of a squirrel eating a burger "
__snake_case = torch.manual_seed(0 )
__snake_case = pipe(
prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a_ )
__snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained(a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = generator.manual_seed(0 )
__snake_case = pipe(
prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = "A painting of a squirrel eating a burger "
__snake_case = torch.manual_seed(0 )
__snake_case = pipe(
prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
__snake_case = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 680 | 0 |
'''simple docstring'''
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
a : int = logging.get_logger(__name__)
a : Optional[int] = {
'artists_file': 'artists.json',
'lyrics_file': 'lyrics.json',
'genres_file': 'genres.json',
}
a : str = {
'artists_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json',
},
'genres_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json',
},
'lyrics_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json',
},
}
a : List[Any] = {
'jukebox': 512,
}
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_LYRIC_TOKENS_SIZES
__SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[Any] , a_ : Dict , a_ : Optional[Any] , a_ : str , a_ : Optional[Any]=["v3", "v2", "v2"] , a_ : Union[str, Any]=512 , a_ : Dict=5 , a_ : List[str]="<|endoftext|>" , **a_ : List[str] , ):
"""simple docstring"""
__snake_case = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else unk_token
super().__init__(
unk_token=UpperCAmelCase_ , n_genres=UpperCAmelCase_ , version=UpperCAmelCase_ , max_n_lyric_tokens=UpperCAmelCase_ , **UpperCAmelCase_ , )
__snake_case = version
__snake_case = max_n_lyric_tokens
__snake_case = n_genres
with open(UpperCAmelCase_ , encoding="utf-8" ) as vocab_handle:
__snake_case = json.load(UpperCAmelCase_ )
with open(UpperCAmelCase_ , encoding="utf-8" ) as vocab_handle:
__snake_case = json.load(UpperCAmelCase_ )
with open(UpperCAmelCase_ , encoding="utf-8" ) as vocab_handle:
__snake_case = json.load(UpperCAmelCase_ )
__snake_case = r"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
__snake_case = oov.replace(r"\-'" , r"\-+'" )
__snake_case = regex.compile(UpperCAmelCase_ )
__snake_case = {v: k for k, v in self.artists_encoder.items()}
__snake_case = {v: k for k, v in self.genres_encoder.items()}
__snake_case = {v: k for k, v in self.lyrics_encoder.items()}
@property
def A ( self : Union[str, Any] ):
"""simple docstring"""
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def A ( self : Tuple ):
"""simple docstring"""
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def A ( self : Dict , a_ : Tuple , a_ : str , a_ : List[str] ):
"""simple docstring"""
__snake_case = [self.artists_encoder.get(UpperCAmelCase_ , 0 ) for artist in list_artists]
for genres in range(len(UpperCAmelCase_ ) ):
__snake_case = [self.genres_encoder.get(UpperCAmelCase_ , 0 ) for genre in list_genres[genres]]
__snake_case = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
__snake_case = [[self.lyrics_encoder.get(UpperCAmelCase_ , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def A ( self : Dict , a_ : List[str] ):
"""simple docstring"""
return list(UpperCAmelCase_ )
def A ( self : List[str] , a_ : str , a_ : int , a_ : List[Any] , **a_ : Optional[Any] ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case = self.prepare_for_tokenization(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
__snake_case = self._tokenize(UpperCAmelCase_ )
return artist, genre, lyrics
def A ( self : List[str] , a_ : str , a_ : str , a_ : str , a_ : bool = False ):
"""simple docstring"""
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
__snake_case = artists[idx].lower()
__snake_case = [genres[idx].lower()]
else:
__snake_case = self._normalize(artists[idx] ) + ".v2"
__snake_case = [
self._normalize(UpperCAmelCase_ ) + ".v2" for genre in genres[idx].split("_" )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
__snake_case = regex.compile(r"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+" )
__snake_case = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"
__snake_case = {vocab[index]: index + 1 for index in range(len(UpperCAmelCase_ ) )}
__snake_case = 0
__snake_case = len(UpperCAmelCase_ ) + 1
__snake_case = self.vocab
__snake_case = {v: k for k, v in self.vocab.items()}
__snake_case = ""
else:
__snake_case = regex.compile(r"[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+" )
__snake_case = self._run_strip_accents(UpperCAmelCase_ )
__snake_case = lyrics.replace("\\" , "\n" )
__snake_case = self.out_of_vocab.sub("" , UpperCAmelCase_ ), [], []
return artists, genres, lyrics
def A ( self : Tuple , a_ : List[str] ):
"""simple docstring"""
__snake_case = unicodedata.normalize("NFD" , UpperCAmelCase_ )
__snake_case = []
for char in text:
__snake_case = unicodedata.category(UpperCAmelCase_ )
if cat == "Mn":
continue
output.append(UpperCAmelCase_ )
return "".join(UpperCAmelCase_ )
def A ( self : List[Any] , a_ : str ):
"""simple docstring"""
__snake_case = (
[chr(UpperCAmelCase_ ) for i in range(ord("a" ) , ord("z" ) + 1 )]
+ [chr(UpperCAmelCase_ ) for i in range(ord("A" ) , ord("Z" ) + 1 )]
+ [chr(UpperCAmelCase_ ) for i in range(ord("0" ) , ord("9" ) + 1 )]
+ ["."]
)
__snake_case = frozenset(UpperCAmelCase_ )
__snake_case = re.compile(r"_+" )
__snake_case = "".join([c if c in accepted else "_" for c in text.lower()] )
__snake_case = pattern.sub("_" , UpperCAmelCase_ ).strip("_" )
return text
def A ( self : List[Any] , a_ : List[str] ):
"""simple docstring"""
return " ".join(UpperCAmelCase_ )
def A ( self : Tuple , a_ : str , a_ : Optional[Union[str, TensorType]] = None , a_ : bool = False ):
"""simple docstring"""
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
__snake_case = TensorType(UpperCAmelCase_ )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"Unable to convert output to TensorFlow tensors format, TensorFlow is not installed." )
import tensorflow as tf
__snake_case = tf.constant
__snake_case = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed." )
import torch
__snake_case = torch.tensor
__snake_case = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("Unable to convert output to JAX tensors format, JAX is not installed." )
import jax.numpy as jnp # noqa: F811
__snake_case = jnp.array
__snake_case = _is_jax
else:
__snake_case = np.asarray
__snake_case = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
__snake_case = [inputs]
if not is_tensor(UpperCAmelCase_ ):
__snake_case = as_tensor(UpperCAmelCase_ )
except: # noqa E722
raise ValueError(
"Unable to create tensor, you should probably activate truncation and/or padding "
"with 'padding=True' 'truncation=True' to have batched tensors with the same length." )
return inputs
def __call__( self : Any , a_ : List[str] , a_ : List[str] , a_ : List[Any]="" , a_ : List[Any]="pt" ):
"""simple docstring"""
__snake_case = [0, 0, 0]
__snake_case = [artist] * len(self.version )
__snake_case = [genres] * len(self.version )
__snake_case , __snake_case , __snake_case = self.tokenize(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
__snake_case , __snake_case , __snake_case = self._convert_token_to_id(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
__snake_case = [-INFINITY] * len(full_tokens[-1] )
__snake_case = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=UpperCAmelCase_ )
for i in range(len(self.version ) )
]
return BatchEncoding({"input_ids": input_ids, "attention_masks": attention_masks} )
def A ( self : Optional[Any] , a_ : str , a_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__snake_case = os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["artists_file"] )
with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=UpperCAmelCase_ ) )
__snake_case = os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["genres_file"] )
with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=UpperCAmelCase_ ) )
__snake_case = os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["lyrics_file"] )
with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=UpperCAmelCase_ ) )
return (artists_file, genres_file, lyrics_file)
def A ( self : Union[str, Any] , a_ : List[str] , a_ : Dict , a_ : List[Any] ):
"""simple docstring"""
__snake_case = self.artists_decoder.get(UpperCAmelCase_ )
__snake_case = [self.genres_decoder.get(UpperCAmelCase_ ) for genre in genres_index]
__snake_case = [self.lyrics_decoder.get(UpperCAmelCase_ ) for character in lyric_index]
return artist, genres, lyrics
| 702 |
'''simple docstring'''
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('''>=''', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
a : Any = get_logger(__name__)
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any]=0 ) -> Any:
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
with FSDP.state_dict_type(
_UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__snake_case = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__snake_case = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if accelerator.process_index == 0:
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__snake_case = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__snake_case = os.path.join(_UpperCAmelCase , F'''{MODEL_NAME}_{model_index}''' )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
logger.info(F'''Saving model to {ckpt_dir}''' )
__snake_case = {"model": state_dict}
dist_cp.save_state_dict(
state_dict=_UpperCAmelCase , storage_writer=dist_cp.FileSystemWriter(_UpperCAmelCase ) , planner=DefaultSavePlanner() , )
logger.info(F'''Model saved to {ckpt_dir}''' )
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : str=0 ) -> List[str]:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
_UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(_UpperCAmelCase ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"Set the `sync_module_states` flag to `True` so that model states are synced across processes when "
"initializing FSDP object" )
return
__snake_case = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Loading model from {input_model_file}''' )
__snake_case = torch.load(_UpperCAmelCase )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__snake_case = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Loading model from {input_model_file}''' )
__snake_case = torch.load(_UpperCAmelCase )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__snake_case = (
os.path.join(_UpperCAmelCase , F'''{MODEL_NAME}_{model_index}''' )
if F'''{MODEL_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading model from {ckpt_dir}''' )
__snake_case = {"model": model.state_dict()}
dist_cp.load_state_dict(
state_dict=_UpperCAmelCase , storage_reader=dist_cp.FileSystemReader(_UpperCAmelCase ) , planner=DefaultLoadPlanner() , )
__snake_case = state_dict["model"]
logger.info(F'''Model loaded from {ckpt_dir}''' )
model.load_state_dict(_UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple=0 ) -> Union[str, Any]:
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
with FSDP.state_dict_type(
_UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__snake_case = FSDP.optim_state_dict(_UpperCAmelCase , _UpperCAmelCase )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
__snake_case = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Saving Optimizer state to {output_optimizer_file}''' )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Optimizer state saved in {output_optimizer_file}''' )
else:
__snake_case = os.path.join(_UpperCAmelCase , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
logger.info(F'''Saving Optimizer state to {ckpt_dir}''' )
dist_cp.save_state_dict(
state_dict={"optimizer": optim_state} , storage_writer=dist_cp.FileSystemWriter(_UpperCAmelCase ) , planner=DefaultSavePlanner() , )
logger.info(F'''Optimizer state saved in {ckpt_dir}''' )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int]=0 ) -> Union[str, Any]:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
_UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__snake_case = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
__snake_case = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Loading Optimizer state from {input_optimizer_file}''' )
__snake_case = torch.load(_UpperCAmelCase )
logger.info(F'''Optimizer state loaded from {input_optimizer_file}''' )
else:
__snake_case = (
os.path.join(_UpperCAmelCase , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
if F'''{OPTIMIZER_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading Optimizer from {ckpt_dir}''' )
__snake_case = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="optimizer" , storage_reader=dist_cp.FileSystemReader(_UpperCAmelCase ) , )
__snake_case = optim_state["optimizer"]
logger.info(F'''Optimizer loaded from {ckpt_dir}''' )
__snake_case = FSDP.optim_state_dict_to_load(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
optimizer.load_state_dict(_UpperCAmelCase )
| 680 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( ) -> str:
for n in range(1 , 1_00_00_00 ):
yield n * (n + 1) // 2
def __UpperCAmelCase ( _UpperCAmelCase : Dict ) -> List[Any]:
__snake_case = 1
__snake_case = 2
while i * i <= n:
__snake_case = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def __UpperCAmelCase ( ) -> Any:
return next(i for i in triangle_number_generator() if count_divisors(_UpperCAmelCase ) > 5_00 )
if __name__ == "__main__":
print(solution())
| 703 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
__snake_case = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_UpperCAmelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a : List[str] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["""pixel_values"""]
def __init__( self : Optional[int] , a_ : bool = True , a_ : Dict[str, int] = None , a_ : float = None , a_ : PILImageResampling = PILImageResampling.BILINEAR , a_ : bool = True , a_ : Union[int, float] = 1 / 255 , a_ : bool = True , a_ : Optional[Union[float, List[float]]] = None , a_ : Optional[Union[float, List[float]]] = None , **a_ : str , ):
"""simple docstring"""
super().__init__(**UpperCamelCase_ )
__snake_case = size if size is not None else {'shortest_edge': 384}
__snake_case = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
__snake_case = do_resize
__snake_case = size
# Default value set here for backwards compatibility where the value in config is None
__snake_case = crop_pct if crop_pct is not None else 224 / 256
__snake_case = resample
__snake_case = do_rescale
__snake_case = rescale_factor
__snake_case = do_normalize
__snake_case = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__snake_case = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A ( self : Tuple , a_ : np.ndarray , a_ : Dict[str, int] , a_ : float , a_ : PILImageResampling = PILImageResampling.BICUBIC , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : str , ):
"""simple docstring"""
__snake_case = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
if "shortest_edge" not in size:
raise ValueError(f'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
__snake_case = size['shortest_edge']
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
__snake_case = int(shortest_edge / crop_pct )
__snake_case = get_resize_output_image_size(UpperCamelCase_ , size=UpperCamelCase_ , default_to_square=UpperCamelCase_ )
__snake_case = resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=UpperCamelCase_ , size=(shortest_edge, shortest_edge) , data_format=UpperCamelCase_ , **UpperCamelCase_ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
UpperCamelCase_ , size=(shortest_edge, shortest_edge) , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def A ( self : Dict , a_ : np.ndarray , a_ : Union[int, float] , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : Any , ):
"""simple docstring"""
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def A ( self : Any , a_ : np.ndarray , a_ : Union[float, List[float]] , a_ : Union[float, List[float]] , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : Dict , ):
"""simple docstring"""
return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def A ( self : List[str] , a_ : ImageInput , a_ : bool = None , a_ : Dict[str, int] = None , a_ : float = None , a_ : PILImageResampling = None , a_ : bool = None , a_ : float = None , a_ : bool = None , a_ : Optional[Union[float, List[float]]] = None , a_ : Optional[Union[float, List[float]]] = None , a_ : Optional[Union[str, TensorType]] = None , a_ : ChannelDimension = ChannelDimension.FIRST , **a_ : int , ):
"""simple docstring"""
__snake_case = do_resize if do_resize is not None else self.do_resize
__snake_case = crop_pct if crop_pct is not None else self.crop_pct
__snake_case = resample if resample is not None else self.resample
__snake_case = do_rescale if do_rescale is not None else self.do_rescale
__snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case = do_normalize if do_normalize is not None else self.do_normalize
__snake_case = image_mean if image_mean is not None else self.image_mean
__snake_case = image_std if image_std is not None else self.image_std
__snake_case = size if size is not None else self.size
__snake_case = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
__snake_case = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
__snake_case = [to_numpy_array(UpperCamelCase_ ) for image in images]
if do_resize:
__snake_case = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , crop_pct=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images]
if do_rescale:
__snake_case = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images]
if do_normalize:
__snake_case = [self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ ) for image in images]
__snake_case = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
__snake_case = {'pixel_values': images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
| 704 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> str:
if number > 0:
raise ValueError("input must be a negative integer" )
__snake_case = len(bin(_UpperCAmelCase )[3:] )
__snake_case = bin(abs(_UpperCAmelCase ) - (1 << binary_number_length) )[3:]
__snake_case = (
(
"1"
+ "0" * (binary_number_length - len(_UpperCAmelCase ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
a : Optional[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
__SCREAMING_SNAKE_CASE = ["""input_values""", """padding_mask"""]
def __init__( self : Any , a_ : int = 1 , a_ : int = 24_000 , a_ : float = 0.0 , a_ : float = None , a_ : float = None , **a_ : List[Any] , ):
"""simple docstring"""
super().__init__(feature_size=__lowerCamelCase , sampling_rate=__lowerCamelCase , padding_value=__lowerCamelCase , **__lowerCamelCase )
__snake_case = chunk_length_s
__snake_case = overlap
@property
def A ( self : Tuple ):
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def A ( self : List[str] ):
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self : str , a_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a_ : Optional[Union[bool, str, PaddingStrategy]] = None , a_ : Optional[bool] = False , a_ : Optional[int] = None , a_ : Optional[Union[str, TensorType]] = None , a_ : Optional[int] = None , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if padding and truncation:
raise ValueError("Both padding and truncation were set. Make sure you only set one." )
elif padding is None:
# by default let's pad the inputs
__snake_case = True
__snake_case = bool(
isinstance(__lowerCamelCase , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
__snake_case = [np.asarray(__lowerCamelCase , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(__lowerCamelCase , np.ndarray ):
__snake_case = np.asarray(__lowerCamelCase , dtype=np.floataa )
elif isinstance(__lowerCamelCase , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
__snake_case = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
__snake_case = [np.asarray(__lowerCamelCase ).T]
# verify inputs are valid
for idx, example in enumerate(__lowerCamelCase ):
if example.ndim > 2:
raise ValueError(f'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f'''Expected stereo audio but example has {example.shape[-1]} channels''' )
__snake_case = None
__snake_case = BatchFeature({"input_values": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
__snake_case = min(array.shape[0] for array in raw_audio )
__snake_case = int(np.floor(max_length / self.chunk_stride ) )
__snake_case = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
__snake_case = max(array.shape[0] for array in raw_audio )
__snake_case = int(np.ceil(max_length / self.chunk_stride ) )
__snake_case = (nb_step - 1) * self.chunk_stride + self.chunk_length
__snake_case = "max_length"
else:
__snake_case = input_values
# normal padding on batch
if padded_inputs is None:
__snake_case = self.pad(
__lowerCamelCase , max_length=__lowerCamelCase , truncation=__lowerCamelCase , padding=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
if padding:
__snake_case = padded_inputs.pop("attention_mask" )
__snake_case = []
for example in padded_inputs.pop("input_values" ):
if self.feature_size == 1:
__snake_case = example[..., None]
input_values.append(example.T )
__snake_case = input_values
if return_tensors is not None:
__snake_case = padded_inputs.convert_to_tensors(__lowerCamelCase )
return padded_inputs
| 705 |
'''simple docstring'''
from timeit import timeit
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> int:
if number < 0:
raise ValueError("the value of input must not be negative" )
__snake_case = 0
while number:
number &= number - 1
result += 1
return result
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> int:
if number < 0:
raise ValueError("the value of input must not be negative" )
__snake_case = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def __UpperCAmelCase ( ) -> None:
def do_benchmark(_UpperCAmelCase : int ) -> None:
__snake_case = "import __main__ as z"
print(F'''Benchmark when {number = }:''' )
print(F'''{get_set_bits_count_using_modulo_operator(_UpperCAmelCase ) = }''' )
__snake_case = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=_UpperCAmelCase )
print(F'''timeit() runs in {timing} seconds''' )
print(F'''{get_set_bits_count_using_brian_kernighans_algorithm(_UpperCAmelCase ) = }''' )
__snake_case = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=_UpperCAmelCase , )
print(F'''timeit() runs in {timing} seconds''' )
for number in (25, 37, 58, 0):
do_benchmark(_UpperCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 680 | 0 |
'''simple docstring'''
from typing import Any
def __UpperCAmelCase ( _UpperCAmelCase : Tuple ) -> list[Any]:
if not input_list:
return []
__snake_case = [input_list.count(lowerCamelCase_ ) for value in input_list]
__snake_case = max(lowerCamelCase_ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowerCamelCase_ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706 |
'''simple docstring'''
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
a : Dict = '''sshleifer/bart-tiny-random'''
a : str = '''patrickvonplaten/t5-tiny-random'''
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def A ( self : Union[str, Any] ):
"""simple docstring"""
return AutoConfig.from_pretrained(a_ )
def A ( self : str ):
"""simple docstring"""
__snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=a_ )
def A ( self : Dict ):
"""simple docstring"""
__snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=a_ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def A ( self : Dict ):
"""simple docstring"""
with self.assertRaises(a_ ):
create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=a_ , d=a_ )
| 680 | 0 |
'''simple docstring'''
from manim import *
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase_ ):
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = Rectangle(height=0.5 , width=0.5 )
__snake_case = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__snake_case = Rectangle(height=0.25 , width=0.25 )
__snake_case = [mem.copy() for i in range(6 )]
__snake_case = [mem.copy() for i in range(6 )]
__snake_case = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__snake_case = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__snake_case = VGroup(_lowercase , _lowercase ).arrange(_lowercase , buff=0 )
__snake_case = Text("CPU" , font_size=24 )
__snake_case = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowercase )
__snake_case = [mem.copy() for i in range(4 )]
__snake_case = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__snake_case = Text("GPU" , font_size=24 )
__snake_case = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
gpu.move_to([-1, -1, 0] )
self.add(_lowercase )
__snake_case = [mem.copy() for i in range(6 )]
__snake_case = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__snake_case = Text("Model" , font_size=24 )
__snake_case = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
model.move_to([3, -1.0, 0] )
self.add(_lowercase )
__snake_case = []
__snake_case = []
for i, rect in enumerate(_lowercase ):
__snake_case = fill.copy().set_fill(_lowercase , opacity=0.8 )
target.move_to(_lowercase )
model_arr.append(_lowercase )
__snake_case = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_lowercase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(_lowercase )
self.add(*_lowercase , *_lowercase )
__snake_case = [meta_mem.copy() for i in range(6 )]
__snake_case = [meta_mem.copy() for i in range(6 )]
__snake_case = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__snake_case = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__snake_case = VGroup(_lowercase , _lowercase ).arrange(_lowercase , buff=0 )
__snake_case = Text("Disk" , font_size=24 )
__snake_case = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
disk.move_to([-4, -1.25, 0] )
self.add(_lowercase , _lowercase )
__snake_case = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__snake_case = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_lowercase , _lowercase )
__snake_case = MarkupText(
f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(_lowercase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_lowercase )
__snake_case = MarkupText(
f'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowercase ) )
__snake_case = Square(0.3 )
input.set_fill(_lowercase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , _lowercase , buff=0.5 )
self.play(Write(_lowercase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=_lowercase , buff=0.02 )
self.play(MoveToTarget(_lowercase ) )
self.play(FadeOut(_lowercase ) )
__snake_case = Arrow(start=_lowercase , end=_lowercase , color=_lowercase , buff=0.5 )
a.next_to(model_arr[0].get_left() , _lowercase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
__snake_case = MarkupText(
f'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowercase , run_time=3 ) )
__snake_case = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(_lowercase ) , Circumscribe(model_arr[0] , color=_lowercase , **_lowercase ) , Circumscribe(model_cpu_arr[0] , color=_lowercase , **_lowercase ) , Circumscribe(gpu_rect[0] , color=_lowercase , **_lowercase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
__snake_case = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , _lowercase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
__snake_case = AnimationGroup(
FadeOut(_lowercase , run_time=0.5 ) , MoveToTarget(_lowercase , run_time=0.5 ) , FadeIn(_lowercase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(_lowercase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
__snake_case = 0.7
self.play(
Circumscribe(model_arr[i] , **_lowercase ) , Circumscribe(cpu_left_col_base[i] , **_lowercase ) , Circumscribe(cpu_left_col_base[i + 1] , color=_lowercase , **_lowercase ) , Circumscribe(gpu_rect[0] , color=_lowercase , **_lowercase ) , Circumscribe(model_arr[i + 1] , color=_lowercase , **_lowercase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=_lowercase , **_lowercase ) , Circumscribe(cpu_left_col_base[-1] , color=_lowercase , **_lowercase ) , Circumscribe(gpu_rect[0] , color=_lowercase , **_lowercase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
__snake_case = a_c
__snake_case = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(_lowercase ) , FadeOut(_lowercase , run_time=0.5 ) , )
__snake_case = MarkupText(f'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowercase , run_time=3 ) , MoveToTarget(_lowercase ) )
self.wait()
| 707 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
a : Any = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """sequence-classification"""
def __init__( self : List[str] , a_ : str ):
"""simple docstring"""
if type(a_ ) == dict:
__snake_case = Namespace(**a_ )
__snake_case = glue_output_modes[hparams.task]
__snake_case = glue_tasks_num_labels[hparams.task]
super().__init__(a_ , a_ , self.mode )
def A ( self : Union[str, Any] , **a_ : List[Any] ):
"""simple docstring"""
return self.model(**a_ )
def A ( self : int , a_ : Optional[Any] , a_ : int ):
"""simple docstring"""
__snake_case = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__snake_case = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
__snake_case = self(**a_ )
__snake_case = outputs[0]
__snake_case = self.trainer.lr_schedulers[0]["scheduler"]
__snake_case = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = self.hparams
__snake_case = processors[args.task]()
__snake_case = processor.get_labels()
for mode in ["train", "dev"]:
__snake_case = self._feature_file(a_ )
if os.path.exists(a_ ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , a_ )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
__snake_case = (
processor.get_dev_examples(args.data_dir )
if mode == "dev"
else processor.get_train_examples(args.data_dir )
)
__snake_case = convert_examples_to_features(
a_ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("Saving features into cached file %s" , a_ )
torch.save(a_ , a_ )
def A ( self : Optional[int] , a_ : str , a_ : int , a_ : bool = False ):
"""simple docstring"""
__snake_case = "dev" if mode == "test" else mode
__snake_case = self._feature_file(a_ )
logger.info("Loading features from cached file %s" , a_ )
__snake_case = torch.load(a_ )
__snake_case = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
__snake_case = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
__snake_case = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
__snake_case = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
__snake_case = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(a_ , a_ , a_ , a_ ) , batch_size=a_ , shuffle=a_ , )
def A ( self : int , a_ : List[str] , a_ : Tuple ):
"""simple docstring"""
__snake_case = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__snake_case = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
__snake_case = self(**a_ )
__snake_case , __snake_case = outputs[:2]
__snake_case = logits.detach().cpu().numpy()
__snake_case = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def A ( self : Dict , a_ : Optional[int] ):
"""simple docstring"""
__snake_case = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item()
__snake_case = np.concatenate([x["pred"] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
__snake_case = np.argmax(a_ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
__snake_case = np.squeeze(a_ )
__snake_case = np.concatenate([x["target"] for x in outputs] , axis=0 )
__snake_case = [[] for _ in range(out_label_ids.shape[0] )]
__snake_case = [[] for _ in range(out_label_ids.shape[0] )]
__snake_case = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , a_ , a_ )}
__snake_case = dict(results.items() )
__snake_case = results
return ret, preds_list, out_label_list
def A ( self : Tuple , a_ : list ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case = self._eval_end(a_ )
__snake_case = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def A ( self : int , a_ : Tuple ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case = self._eval_end(a_ )
__snake_case = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def A ( a_ : str , a_ : Any ):
"""simple docstring"""
BaseTransformer.add_model_specific_args(a_ , a_ )
parser.add_argument(
"--max_seq_length" , default=128 , type=a_ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--task" , default="" , type=a_ , required=a_ , help="The GLUE task to run" , )
parser.add_argument(
"--gpus" , default=0 , type=a_ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
def __UpperCAmelCase ( ) -> Union[str, Any]:
__snake_case = argparse.ArgumentParser()
add_generic_args(_UpperCAmelCase , os.getcwd() )
__snake_case = GLUETransformer.add_model_specific_args(_UpperCAmelCase , os.getcwd() )
__snake_case = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
__snake_case = os.path.join(
"./results" , F'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' , )
os.makedirs(args.output_dir )
__snake_case = GLUETransformer(_UpperCAmelCase )
__snake_case = generic_train(_UpperCAmelCase , _UpperCAmelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
__snake_case = sorted(glob.glob(os.path.join(args.output_dir , "checkpoint-epoch=*.ckpt" ) , recursive=_UpperCAmelCase ) )
__snake_case = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_UpperCAmelCase )
if __name__ == "__main__":
main()
| 680 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def __UpperCAmelCase ( _UpperCAmelCase : list , _UpperCAmelCase : list ) -> list:
if len(__A ) != 2 or len(a[0] ) != 2 or len(__A ) != 2 or len(b[0] ) != 2:
raise Exception("Matrices are not 2x2" )
__snake_case = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def __UpperCAmelCase ( _UpperCAmelCase : list , _UpperCAmelCase : list ) -> Optional[Any]:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__A ) )
]
def __UpperCAmelCase ( _UpperCAmelCase : list , _UpperCAmelCase : list ) -> Any:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__A ) )
]
def __UpperCAmelCase ( _UpperCAmelCase : list ) -> tuple[list, list, list, list]:
if len(__A ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("Odd matrices are not supported!" )
__snake_case = len(__A )
__snake_case = matrix_length // 2
__snake_case = [[a[i][j] for j in range(__A , __A )] for i in range(__A )]
__snake_case = [
[a[i][j] for j in range(__A , __A )] for i in range(__A , __A )
]
__snake_case = [[a[i][j] for j in range(__A )] for i in range(__A )]
__snake_case = [[a[i][j] for j in range(__A )] for i in range(__A , __A )]
return top_left, top_right, bot_left, bot_right
def __UpperCAmelCase ( _UpperCAmelCase : list ) -> tuple[int, int]:
return len(__A ), len(matrix[0] )
def __UpperCAmelCase ( _UpperCAmelCase : list ) -> None:
print("\n".join(str(__A ) for line in matrix ) )
def __UpperCAmelCase ( _UpperCAmelCase : list , _UpperCAmelCase : list ) -> list:
if matrix_dimensions(__A ) == (2, 2):
return default_matrix_multiplication(__A , __A )
__snake_case = split_matrix(__A )
__snake_case = split_matrix(__A )
__snake_case = actual_strassen(__A , matrix_subtraction(__A , __A ) )
__snake_case = actual_strassen(matrix_addition(__A , __A ) , __A )
__snake_case = actual_strassen(matrix_addition(__A , __A ) , __A )
__snake_case = actual_strassen(__A , matrix_subtraction(__A , __A ) )
__snake_case = actual_strassen(matrix_addition(__A , __A ) , matrix_addition(__A , __A ) )
__snake_case = actual_strassen(matrix_subtraction(__A , __A ) , matrix_addition(__A , __A ) )
__snake_case = actual_strassen(matrix_subtraction(__A , __A ) , matrix_addition(__A , __A ) )
__snake_case = matrix_addition(matrix_subtraction(matrix_addition(__A , __A ) , __A ) , __A )
__snake_case = matrix_addition(__A , __A )
__snake_case = matrix_addition(__A , __A )
__snake_case = matrix_subtraction(matrix_subtraction(matrix_addition(__A , __A ) , __A ) , __A )
# construct the new matrix from our 4 quadrants
__snake_case = []
for i in range(len(__A ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(__A ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def __UpperCAmelCase ( _UpperCAmelCase : list , _UpperCAmelCase : list ) -> list:
if matrix_dimensions(__A )[1] != matrix_dimensions(__A )[0]:
__snake_case = (
'''Unable to multiply these matrices, please check the dimensions.\n'''
F'''Matrix A: {matrixa}\n'''
F'''Matrix B: {matrixa}'''
)
raise Exception(__A )
__snake_case = matrix_dimensions(__A )
__snake_case = matrix_dimensions(__A )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
__snake_case = max(*__A , *__A )
__snake_case = int(math.pow(2 , math.ceil(math.loga(__A ) ) ) )
__snake_case = matrixa
__snake_case = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , __A ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __A ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __A ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
__snake_case = actual_strassen(__A , __A )
# Removing the additional zeros
for i in range(0 , __A ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __A ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
a : List[str] = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
a : List[Any] = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 708 |
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("dataset_size" , [None, 4_00 * 2**20, 6_00 * 2**20] )
@pytest.mark.parametrize("input_in_memory_max_size" , ["default", 0, 1_00 * 2**20, 9_00 * 2**20] )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> int:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , "IN_MEMORY_MAX_SIZE" , _UpperCAmelCase )
__snake_case = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
__snake_case = dataset_size < in_memory_max_size
else:
__snake_case = False
__snake_case = is_small_dataset(_UpperCAmelCase )
assert result == expected
| 680 | 0 |
'''simple docstring'''
import pprint
import requests
a : Union[str, Any] = 'https://zenquotes.io/api'
def __UpperCAmelCase ( ) -> Optional[Any]:
return requests.get(API_ENDPOINT_URL + "/today" ).json()
def __UpperCAmelCase ( ) -> Optional[Any]:
return requests.get(API_ENDPOINT_URL + "/random" ).json()
if __name__ == "__main__":
a : Dict = random_quotes()
pprint.pprint(response)
| 709 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : float ) -> float:
if edge <= 0 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("Length must be a positive." )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def __UpperCAmelCase ( _UpperCAmelCase : float ) -> float:
if edge <= 0 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("Length must be a positive." )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = "swinv2"
__SCREAMING_SNAKE_CASE = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Union[str, Any] , a_ : List[str]=224 , a_ : Tuple=4 , a_ : Optional[Any]=3 , a_ : Optional[Any]=96 , a_ : List[Any]=[2, 2, 6, 2] , a_ : Union[str, Any]=[3, 6, 12, 24] , a_ : Any=7 , a_ : List[Any]=4.0 , a_ : Union[str, Any]=True , a_ : List[Any]=0.0 , a_ : int=0.0 , a_ : str=0.1 , a_ : Optional[int]="gelu" , a_ : Tuple=False , a_ : Optional[Any]=0.02 , a_ : Optional[int]=1e-5 , a_ : Union[str, Any]=32 , **a_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(**UpperCamelCase_ )
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = embed_dim
__snake_case = depths
__snake_case = len(UpperCamelCase_ )
__snake_case = num_heads
__snake_case = window_size
__snake_case = mlp_ratio
__snake_case = qkv_bias
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = drop_path_rate
__snake_case = hidden_act
__snake_case = use_absolute_embeddings
__snake_case = layer_norm_eps
__snake_case = initializer_range
__snake_case = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__snake_case = int(embed_dim * 2 ** (len(UpperCamelCase_ ) - 1) )
__snake_case = (0, 0, 0, 0)
| 710 |
'''simple docstring'''
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
a : Any = 6_378_137.0
a : List[Any] = 6_356_752.314_245
a : Dict = 6_378_137
def __UpperCAmelCase ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float ) -> float:
__snake_case = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
__snake_case = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
__snake_case = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
__snake_case = haversine_distance(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
__snake_case = (b_lata + b_lata) / 2
__snake_case = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
__snake_case = (sin(_UpperCAmelCase ) ** 2) * (cos(_UpperCAmelCase ) ** 2)
__snake_case = cos(sigma / 2 ) ** 2
__snake_case = (sigma - sin(_UpperCAmelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
__snake_case = (cos(_UpperCAmelCase ) ** 2) * (sin(_UpperCAmelCase ) ** 2)
__snake_case = sin(sigma / 2 ) ** 2
__snake_case = (sigma + sin(_UpperCAmelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 0 |
'''simple docstring'''
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
a : Any = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __UpperCAmelCase ( _UpperCAmelCase ) -> Optional[Any]:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __UpperCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
return max(metric_fn(_lowerCAmelCase , _lowerCAmelCase ) for gt in ground_truths )
def __UpperCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> str:
__snake_case = [line.strip() for line in open(_lowerCAmelCase , "r" ).readlines()]
__snake_case = []
if args.gold_data_mode == "qa":
__snake_case = pd.read_csv(_lowerCAmelCase , sep="\t" , header=_lowerCAmelCase )
for answer_list in data[1]:
__snake_case = ast.literal_eval(_lowerCAmelCase )
answers.append(_lowerCAmelCase )
else:
__snake_case = [line.strip() for line in open(_lowerCAmelCase , "r" ).readlines()]
__snake_case = [[reference] for reference in references]
__snake_case = 0
for prediction, ground_truths in zip(_lowerCAmelCase , _lowerCAmelCase ):
total += 1
em += metric_max_over_ground_truths(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
fa += metric_max_over_ground_truths(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__snake_case = 1_00.0 * em / total
__snake_case = 1_00.0 * fa / total
logger.info(F'''F1: {fa:.2f}''' )
logger.info(F'''EM: {em:.2f}''' )
def __UpperCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
__snake_case = args.k
__snake_case = [line.strip() for line in open(_lowerCAmelCase , "r" ).readlines()]
__snake_case = [line.strip() for line in open(_lowerCAmelCase , "r" ).readlines()]
__snake_case = 0
for hypo, reference in zip(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case = set(hypo.split("\t" )[:k] )
__snake_case = set(reference.split("\t" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
__snake_case = 1_00.0 * em / total
logger.info(F'''Precision@{k}: {em: .2f}''' )
def __UpperCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
def strip_title(_UpperCAmelCase ):
if title.startswith("\"" ):
__snake_case = title[1:]
if title.endswith("\"" ):
__snake_case = title[:-1]
return title
__snake_case = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_lowerCAmelCase , return_tensors="pt" , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , )["input_ids"].to(args.device )
__snake_case = rag_model.rag.question_encoder(_lowerCAmelCase )
__snake_case = question_enc_outputs[0]
__snake_case = rag_model.retriever(
_lowerCAmelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , )
__snake_case = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
__snake_case = []
for docs in all_docs:
__snake_case = [strip_title(_lowerCAmelCase ) for title in docs["title"]]
provenance_strings.append("\t".join(_lowerCAmelCase ) )
return provenance_strings
def __UpperCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
with torch.no_grad():
__snake_case = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_lowerCAmelCase , return_tensors="pt" , padding=_lowerCAmelCase , truncation=_lowerCAmelCase )
__snake_case = inputs_dict.input_ids.to(args.device )
__snake_case = inputs_dict.attention_mask.to(args.device )
__snake_case = rag_model.generate( # rag_model overwrites generate
_lowerCAmelCase , attention_mask=_lowerCAmelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=_lowerCAmelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
__snake_case = rag_model.retriever.generator_tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
if args.print_predictions:
for q, a in zip(_lowerCAmelCase , _lowerCAmelCase ):
logger.info("Q: {} - A: {}".format(_lowerCAmelCase , _lowerCAmelCase ) )
return answers
def __UpperCAmelCase ( ) -> List[str]:
__snake_case = argparse.ArgumentParser()
parser.add_argument(
"--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=_lowerCAmelCase , help=(
"RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"
" model_name_or_path"
) , )
parser.add_argument(
"--index_name" , default=_lowerCAmelCase , choices=["exact", "compressed", "legacy"] , type=_lowerCAmelCase , help="RAG model retriever type" , )
parser.add_argument(
"--index_path" , default=_lowerCAmelCase , type=_lowerCAmelCase , help="Path to the retrieval index" , )
parser.add_argument("--n_docs" , default=5 , type=_lowerCAmelCase , help="Number of retrieved docs" )
parser.add_argument(
"--model_name_or_path" , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , )
parser.add_argument(
"--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=_lowerCAmelCase , help=(
"Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"
" precision@k."
) , )
parser.add_argument("--k" , default=1 , type=_lowerCAmelCase , help="k for the precision@k calculation" )
parser.add_argument(
"--evaluation_set" , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help="Path to a file containing evaluation samples" , )
parser.add_argument(
"--gold_data_path" , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help="Path to a tab-separated file with gold samples" , )
parser.add_argument(
"--gold_data_mode" , default="qa" , type=_lowerCAmelCase , choices=["qa", "ans"] , help=(
"Format of the gold data file"
"qa - a single line in the following format: question [tab] answer_list"
"ans - a single line of the gold file contains the expected answer string"
) , )
parser.add_argument(
"--predictions_path" , type=_lowerCAmelCase , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , )
parser.add_argument(
"--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , )
parser.add_argument(
"--eval_batch_size" , default=8 , type=_lowerCAmelCase , help="Batch size per GPU/CPU for evaluation." , )
parser.add_argument(
"--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , )
parser.add_argument(
"--num_beams" , default=4 , type=_lowerCAmelCase , help="Number of beams to be used when generating answers" , )
parser.add_argument("--min_length" , default=1 , type=_lowerCAmelCase , help="Min length of the generated answers" )
parser.add_argument("--max_length" , default=50 , type=_lowerCAmelCase , help="Max length of the generated answers" )
parser.add_argument(
"--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , )
parser.add_argument(
"--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , )
__snake_case = parser.parse_args()
__snake_case = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
return args
def __UpperCAmelCase ( _UpperCAmelCase ) -> List[Any]:
__snake_case = {}
if args.model_type is None:
__snake_case = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("rag" ):
__snake_case = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration
__snake_case = args.n_docs
if args.index_name is not None:
__snake_case = args.index_name
if args.index_path is not None:
__snake_case = args.index_path
else:
__snake_case = BartForConditionalGeneration
__snake_case = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("Evaluate the following checkpoints: %s" , _lowerCAmelCase )
__snake_case = get_scores if args.eval_mode == "e2e" else get_precision_at_k
__snake_case = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) )
score_fn(_lowerCAmelCase , args.predictions_path , args.gold_data_path )
continue
logger.info("***** Running evaluation for {} *****".format(_lowerCAmelCase ) )
logger.info(" Batch size = %d" , args.eval_batch_size )
logger.info(" Predictions will be stored under {}".format(args.predictions_path ) )
if args.model_type.startswith("rag" ):
__snake_case = RagRetriever.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
__snake_case = model_class.from_pretrained(_lowerCAmelCase , retriever=_lowerCAmelCase , **_lowerCAmelCase )
model.retriever.init_retrieval()
else:
__snake_case = model_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
model.to(args.device )
with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file:
__snake_case = []
for line in tqdm(_lowerCAmelCase ):
questions.append(line.strip() )
if len(_lowerCAmelCase ) == args.eval_batch_size:
__snake_case = evaluate_batch_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
preds_file.write("\n".join(_lowerCAmelCase ) + "\n" )
preds_file.flush()
__snake_case = []
if len(_lowerCAmelCase ) > 0:
__snake_case = evaluate_batch_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
preds_file.write("\n".join(_lowerCAmelCase ) )
preds_file.flush()
score_fn(_lowerCAmelCase , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
a : Any = get_args()
main(args)
| 711 |
'''simple docstring'''
import math
import sys
import cva
import numpy as np
def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float ) -> np.ndarray:
# For applying gaussian function for each element in matrix.
__snake_case = math.sqrt(_UpperCAmelCase )
__snake_case = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> np.ndarray:
__snake_case = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : float ) -> np.ndarray:
# Creates a gaussian kernel of given dimension.
__snake_case = np.zeros((kernel_size, kernel_size) )
for i in range(0 , _UpperCAmelCase ):
for j in range(0 , _UpperCAmelCase ):
__snake_case = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_UpperCAmelCase , _UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : int , ) -> np.ndarray:
__snake_case = np.zeros(img.shape )
__snake_case = get_gauss_kernel(_UpperCAmelCase , _UpperCAmelCase )
__snake_case , __snake_case = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__snake_case = get_slice(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__snake_case = img_s - img_s[kernel_size // 2, kernel_size // 2]
__snake_case = vec_gaussian(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = np.multiply(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = np.multiply(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = np.sum(_UpperCAmelCase ) / np.sum(_UpperCAmelCase )
__snake_case = val
return imga
def __UpperCAmelCase ( _UpperCAmelCase : list ) -> tuple:
__snake_case = args[1] if args[1:] else "../image_data/lena.jpg"
__snake_case = float(args[2] ) if args[2:] else 1.0
__snake_case = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__snake_case = int(args[4] )
__snake_case = kernel_size + abs(kernel_size % 2 - 1 )
else:
__snake_case = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
a , a , a , a : Tuple = parse_args(sys.argv)
a : Tuple = cva.imread(filename, 0)
cva.imshow('''input image''', img)
a : Dict = img / 255
a : str = out.astype('''float32''')
a : Union[str, Any] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
a : Dict = out * 255
a : List[str] = np.uinta(out)
cva.imshow('''output image''', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 680 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@property
def A ( self : str ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def A ( self : Dict ):
"""simple docstring"""
__snake_case = ort.SessionOptions()
__snake_case = False
return options
def A ( self : int ):
"""simple docstring"""
__snake_case = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
__snake_case = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
__snake_case = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_a )
__snake_case = """A red cat sitting on a park bench"""
__snake_case = np.random.RandomState(0 )
__snake_case = pipe(
prompt=_a , image=_a , mask_image=_a , guidance_scale=7.5 , num_inference_steps=10 , generator=_a , output_type="np" , )
__snake_case = output.images
__snake_case = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
__snake_case = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
__snake_case = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
__snake_case = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" )
__snake_case = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=_a , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_a )
__snake_case = """A red cat sitting on a park bench"""
__snake_case = np.random.RandomState(0 )
__snake_case = pipe(
prompt=_a , image=_a , mask_image=_a , guidance_scale=7.5 , num_inference_steps=20 , generator=_a , output_type="np" , )
__snake_case = output.images
__snake_case = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
__snake_case = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 712 |
'''simple docstring'''
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any , a_ : Dict , a_ : Union[str, Any] , a_ : Tuple ):
"""simple docstring"""
__snake_case = name
__snake_case = value
__snake_case = weight
def __repr__( self : Optional[int] ):
"""simple docstring"""
return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def A ( self : Any ):
"""simple docstring"""
return self.value
def A ( self : str ):
"""simple docstring"""
return self.name
def A ( self : int ):
"""simple docstring"""
return self.weight
def A ( self : Tuple ):
"""simple docstring"""
return self.value / self.weight
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
__snake_case = []
for i in range(len(_UpperCAmelCase ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> int:
__snake_case = sorted(_UpperCAmelCase , key=_UpperCAmelCase , reverse=_UpperCAmelCase )
__snake_case = []
__snake_case , __snake_case = 0.0, 0.0
for i in range(len(_UpperCAmelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __UpperCAmelCase ( ) -> Optional[Any]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 0 |
'''simple docstring'''
a : int = [0, 2, 4, 6, 8]
a : Any = [1, 3, 5, 7, 9]
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict ) -> int:
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
__snake_case = 0
for digit in range(10 ):
__snake_case = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , __UpperCAmelCase , __UpperCAmelCase )
return result
__snake_case = 0
for digita in range(10 ):
__snake_case = digita
if (remainder + digita) % 2 == 0:
__snake_case = ODD_DIGITS
else:
__snake_case = EVEN_DIGITS
for digita in other_parity_digits:
__snake_case = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , __UpperCAmelCase , __UpperCAmelCase , )
return result
def __UpperCAmelCase ( _UpperCAmelCase : Dict = 9 ) -> int:
__snake_case = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(__UpperCAmelCase , 0 , [0] * length , __UpperCAmelCase )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 713 |
'''simple docstring'''
import os
from math import logaa
def __UpperCAmelCase ( _UpperCAmelCase : str = "base_exp.txt" ) -> int:
__snake_case = 0
__snake_case = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(_UpperCAmelCase ) , _UpperCAmelCase ) ) ):
__snake_case , __snake_case = list(map(_UpperCAmelCase , line.split("," ) ) )
if x * logaa(_UpperCAmelCase ) > largest:
__snake_case = x * logaa(_UpperCAmelCase )
__snake_case = i + 1
return result
if __name__ == "__main__":
print(solution())
| 680 | 0 |
'''simple docstring'''
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
a : List[Any] = logging.getLogger(__name__)
a : str = '''pytorch_model.bin'''
@dataclasses.dataclass
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = dataclasses.field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models."""} )
__SCREAMING_SNAKE_CASE = dataclasses.field(
default=_UpperCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co."""} , )
@dataclasses.dataclass
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = dataclasses.field(metadata={"""help""": """A csv or a json file containing the training data."""} )
__SCREAMING_SNAKE_CASE = dataclasses.field(metadata={"""help""": """A csv or a json file containing the data to predict on."""} )
__SCREAMING_SNAKE_CASE = dataclasses.field(
default=_UpperCamelCase , metadata={"""help""": """A csv or a json file containing the validation data."""} )
__SCREAMING_SNAKE_CASE = dataclasses.field(
default=_UpperCamelCase , metadata={"""help""": """The name of the task to train on."""} , )
__SCREAMING_SNAKE_CASE = dataclasses.field(
default=_UpperCamelCase , metadata={"""help""": """The list of labels for the task."""} )
@dataclasses.dataclass
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = dataclasses.field(
metadata={"""help""": """The output directory where the model predictions and checkpoints will be written."""} )
__SCREAMING_SNAKE_CASE = dataclasses.field(
default="""accuracy""" , metadata={"""help""": """The evaluation metric used for the task."""} )
__SCREAMING_SNAKE_CASE = dataclasses.field(
default="""no""" , metadata={
"""help""": """The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"""
} , )
__SCREAMING_SNAKE_CASE = dataclasses.field(
default=10 , metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""} , )
__SCREAMING_SNAKE_CASE = dataclasses.field(
default=0.0 , metadata={
"""help""": """How much the specified evaluation metric must improve to satisfy early stopping conditions."""
} , )
__SCREAMING_SNAKE_CASE = dataclasses.field(
default=_UpperCamelCase , metadata={"""help""": """Whether to filter the pseudo-labeled data based on the confidence score."""} , )
__SCREAMING_SNAKE_CASE = dataclasses.field(
default=_UpperCamelCase , metadata={"""help""": """Whether to filter the pseudo-labeled data based on the validation performance."""} , )
__SCREAMING_SNAKE_CASE = dataclasses.field(
default=_UpperCamelCase , metadata={"""help""": """Whether to fine-tune on labeled data after pseudo training."""} , )
__SCREAMING_SNAKE_CASE = dataclasses.field(
default=0.0 , metadata={"""help""": """Confidence threshold for pseudo-labeled data filtering."""} , )
__SCREAMING_SNAKE_CASE = dataclasses.field(
default=100 , metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""} , )
__SCREAMING_SNAKE_CASE = dataclasses.field(
default=_UpperCamelCase , metadata={"""help""": """Random seed for initialization."""} , )
def __UpperCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
__snake_case = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
__snake_case = dataset.filter(lambda _UpperCAmelCase : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
__snake_case = int(eval_result * len(lowerCAmelCase__ ) )
print(lowerCAmelCase__ )
__snake_case = dataset.sort("probability" , reverse=lowerCAmelCase__ )
__snake_case = dataset.select(range(lowerCAmelCase__ ) )
__snake_case = dataset.remove_columns(["label", "probability"] )
__snake_case = dataset.rename_column("prediction" , "label" )
__snake_case = dataset.map(lambda _UpperCAmelCase : {"label": idalabel[example["label"]]} )
__snake_case = dataset.shuffle(seed=args.seed )
__snake_case = os.path.join(lowerCAmelCase__ , F'''train_pseudo.{args.data_file_extension}''' )
if args.data_file_extension == "csv":
dataset.to_csv(lowerCAmelCase__ , index=lowerCAmelCase__ )
else:
dataset.to_json(lowerCAmelCase__ )
def __UpperCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> int:
__snake_case = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
__snake_case = STModelArguments(model_name_or_path=lowerCAmelCase__ )
__snake_case = STDataArguments(train_file=lowerCAmelCase__ , infer_file=lowerCAmelCase__ )
__snake_case = STTrainingArguments(output_dir=lowerCAmelCase__ )
__snake_case = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(lowerCAmelCase__ ).items():
setattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
for key, value in kwargs.items():
if hasattr(lowerCAmelCase__ , lowerCAmelCase__ ):
setattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Sanity checks
__snake_case = {}
__snake_case = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
__snake_case = args.train_file
__snake_case = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
__snake_case = args.eval_file
for key in data_files:
__snake_case = data_files[key].split("." )[-1]
assert extension in ["csv", "json"], F'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
__snake_case = extension
else:
assert extension == args.data_file_extension, F'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), F'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("Creating the initial data directory for self-training..." )
__snake_case = F'''{args.output_dir}/self-train_iter-{{}}'''.format
__snake_case = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=lowerCAmelCase__ )
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
accelerator.wait_for_everyone()
__snake_case = None
__snake_case = None
__snake_case = 0
__snake_case = False
# Show the progress bar
__snake_case = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
__snake_case = data_dir_format(lowerCAmelCase__ )
assert os.path.exists(lowerCAmelCase__ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
__snake_case = os.path.join(lowerCAmelCase__ , "stage-1" )
__snake_case = {
"accelerator": accelerator,
"model_name_or_path": args.model_name_or_path,
"cache_dir": args.cache_dir,
"do_train": True,
"train_file": data_files["train"] if iteration == 0 else data_files["train_pseudo"],
"do_eval": True if args.eval_file is not None else False,
"eval_file": data_files["eval"],
"do_predict": True,
"infer_file": data_files["infer"],
"task_name": args.task_name,
"label_list": args.label_list,
"output_dir": current_output_dir,
"eval_metric": args.eval_metric,
"evaluation_strategy": args.evaluation_strategy,
"early_stopping_patience": args.early_stopping_patience,
"early_stopping_threshold": args.early_stopping_threshold,
"seed": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(lowerCAmelCase__ , lowerCAmelCase__ ):
arguments_dict.update({key: value} )
__snake_case = os.path.join(lowerCAmelCase__ , "best-checkpoint" , lowerCAmelCase__ )
if os.path.exists(lowerCAmelCase__ ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1." , lowerCAmelCase__ , lowerCAmelCase__ , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 1 *****" , lowerCAmelCase__ )
finetune(**lowerCAmelCase__ )
accelerator.wait_for_everyone()
assert os.path.exists(lowerCAmelCase__ )
logger.info("Self-training job completed: iteration: %d, stage: 1." , lowerCAmelCase__ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
__snake_case = os.path.join(lowerCAmelCase__ , "best-checkpoint" )
__snake_case = os.path.join(lowerCAmelCase__ , "stage-2" )
# Update arguments_dict
__snake_case = model_path
__snake_case = data_files["train"]
__snake_case = current_output_dir
__snake_case = os.path.join(lowerCAmelCase__ , "best-checkpoint" , lowerCAmelCase__ )
if os.path.exists(lowerCAmelCase__ ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2." , lowerCAmelCase__ , lowerCAmelCase__ , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 2 *****" , lowerCAmelCase__ )
finetune(**lowerCAmelCase__ )
accelerator.wait_for_everyone()
assert os.path.exists(lowerCAmelCase__ )
logger.info("Self-training job completed: iteration: %d, stage: 2." , lowerCAmelCase__ )
__snake_case = iteration
__snake_case = data_dir_format(iteration + 1 )
__snake_case = AutoConfig.from_pretrained(os.path.join(lowerCAmelCase__ , "best-checkpoint" ) )
__snake_case = config.idalabel
__snake_case = os.path.join(lowerCAmelCase__ , "eval_results_best-checkpoint.json" )
__snake_case = os.path.join(lowerCAmelCase__ , "test_results_best-checkpoint.json" )
assert os.path.exists(lowerCAmelCase__ )
with open(lowerCAmelCase__ , "r" ) as f:
__snake_case = float(json.load(lowerCAmelCase__ )[args.eval_metric] )
__snake_case = os.path.join(lowerCAmelCase__ , "infer_output_best-checkpoint.csv" )
assert os.path.exists(lowerCAmelCase__ )
# Loading the dataset from local csv or json files.
__snake_case = load_dataset(args.data_file_extension , data_files={"data": data_files["infer"]} )["data"]
__snake_case = load_dataset("csv" , data_files={"data": infer_output_file} )["data"]
if accelerator.is_main_process:
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
shutil.copy(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , F'''eval_results_iter-{iteration}.json''' ) )
if os.path.exists(lowerCAmelCase__ ):
shutil.copy(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , F'''test_results_iter-{iteration}.json''' ) )
create_pseudo_labeled_data(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
accelerator.wait_for_everyone()
__snake_case = os.path.join(lowerCAmelCase__ , F'''train_pseudo.{args.data_file_extension}''' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
__snake_case = eval_result
if best_iteration is None:
__snake_case = new_iteration
__snake_case = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
__snake_case = new_iteration
__snake_case = new_eval_result
__snake_case = 0
else:
if new_eval_result == best_eval_result:
__snake_case = new_iteration
__snake_case = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
__snake_case = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("Best iteration: %d" , lowerCAmelCase__ )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , lowerCAmelCase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowerCAmelCase__ , F'''eval_results_iter-{iteration}.json''' ) , os.path.join(lowerCAmelCase__ , "eval_results_best-iteration.json" ) , )
else:
# Assume that the last iteration is the best
logger.info("Best iteration: %d" , args.max_selftrain_iterations - 1 )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , lowerCAmelCase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowerCAmelCase__ , F'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(lowerCAmelCase__ , "eval_results_best-iteration.json" ) , )
| 714 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
a : List[Any] = logging.get_logger(__name__)
a : Dict = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
a : Any = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
a : Optional[int] = {
'''facebook/blenderbot_small-90M''': 512,
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = BlenderbotSmallTokenizer
def __init__( self : List[Any] , a_ : Optional[int]=None , a_ : Dict=None , a_ : int="<|endoftext|>" , a_ : str="<|endoftext|>" , a_ : Any="<|endoftext|>" , a_ : Dict=False , a_ : Optional[Any]=True , **a_ : Dict , ):
"""simple docstring"""
super().__init__(
ByteLevelBPETokenizer(
vocab=a_ , merges=a_ , add_prefix_space=a_ , trim_offsets=a_ , ) , bos_token=a_ , eos_token=a_ , unk_token=a_ , **a_ , )
__snake_case = add_prefix_space
def A ( self : Dict , a_ : int , a_ : Union[str, Any]=None ):
"""simple docstring"""
__snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A ( self : str , a_ : List[int] , a_ : Optional[List[int]] = None ):
"""simple docstring"""
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 680 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : Any = {
"sail/poolformer_s12": "https://huggingface.co/sail/poolformer_s12/resolve/main/config.json",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
__SCREAMING_SNAKE_CASE = """poolformer"""
def __init__( self : int , a_ : int=3 , a_ : int=16 , a_ : List[Any]=16 , a_ : Optional[int]=3 , a_ : Union[str, Any]=4.0 , a_ : Optional[Any]=[2, 2, 6, 2] , a_ : int=[64, 128, 320, 512] , a_ : Any=[7, 3, 3, 3] , a_ : List[str]=[4, 2, 2, 2] , a_ : List[Any]=[2, 1, 1, 1] , a_ : List[str]=4 , a_ : Dict=0.0 , a_ : Optional[int]="gelu" , a_ : List[Any]=True , a_ : str=1e-5 , a_ : List[str]=0.02 , **a_ : Optional[int] , ):
"""simple docstring"""
__snake_case = num_channels
__snake_case = patch_size
__snake_case = stride
__snake_case = padding
__snake_case = pool_size
__snake_case = hidden_sizes
__snake_case = mlp_ratio
__snake_case = depths
__snake_case = patch_sizes
__snake_case = strides
__snake_case = num_encoder_blocks
__snake_case = drop_path_rate
__snake_case = hidden_act
__snake_case = use_layer_scale
__snake_case = layer_scale_init_value
__snake_case = initializer_range
super().__init__(**_UpperCamelCase )
class SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
__SCREAMING_SNAKE_CASE = version.parse("""1.11""" )
@property
def A ( self : Union[str, Any] ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def A ( self : Tuple ):
"""simple docstring"""
return 2e-3
| 715 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a : str = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
a : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 680 | 0 |
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def A__ ( _UpperCAmelCase : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Dict ) -> Union[str, Any]:
__snake_case = 0
if start < end:
__snake_case = randint(_lowerCamelCase , _lowerCamelCase )
__snake_case = a[end]
__snake_case = a[pivot]
__snake_case = temp
__snake_case = _in_place_partition(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
count += _in_place_quick_sort(_lowerCamelCase , _lowerCamelCase , p - 1 )
count += _in_place_quick_sort(_lowerCamelCase , p + 1 , _lowerCamelCase )
return count
def A__ ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str:
__snake_case = 0
__snake_case = randint(_lowerCamelCase , _lowerCamelCase )
__snake_case = a[end]
__snake_case = a[pivot]
__snake_case = temp
__snake_case = start - 1
for index in range(_lowerCamelCase , _lowerCamelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
__snake_case = new_pivot_index + 1
__snake_case = a[new_pivot_index]
__snake_case = a[index]
__snake_case = temp
__snake_case = a[new_pivot_index + 1]
__snake_case = a[end]
__snake_case = temp
return new_pivot_index + 1, count
a : List[Any] = TemporaryFile()
a : str = 100 # 1000 elements are to be sorted
a , a : Optional[int] = 0, 1 # mean and standard deviation
a : Any = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
a : Optional[Any] = np.load(outfile)
a : str = len(M) - 1
a : Dict = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 716 |
'''simple docstring'''
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
a : Optional[Any] = float('''nan''')
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any , a_ : Optional[int] ):
"""simple docstring"""
__snake_case = sys.stdout
__snake_case = open(a_ , "a" )
def __getattr__( self : str , a_ : List[Any] ):
"""simple docstring"""
return getattr(self.stdout , a_ )
def A ( self : Union[str, Any] , a_ : List[Any] ):
"""simple docstring"""
self.stdout.write(a_ )
# strip tqdm codes
self.file.write(re.sub(r"^.*\r" , "" , a_ , 0 , re.M ) )
def __UpperCAmelCase ( _UpperCAmelCase : int=80 , _UpperCAmelCase : Any=False ) -> Optional[int]:
__snake_case = []
# deal with critical env vars
__snake_case = ["CUDA_VISIBLE_DEVICES"]
for key in env_keys:
__snake_case = os.environ.get(_UpperCAmelCase , _UpperCAmelCase )
if val is not None:
cmd.append(F'''{key}={val}''' )
# python executable (not always needed if the script is executable)
__snake_case = sys.executable if full_python_path else sys.executable.split("/" )[-1]
cmd.append(_UpperCAmelCase )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
__snake_case = []
__snake_case = ""
while len(_UpperCAmelCase ) > 0:
current_line += F'''{cmd.pop(0 )} '''
if len(_UpperCAmelCase ) == 0 or len(_UpperCAmelCase ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(_UpperCAmelCase )
__snake_case = ""
return "\\\n".join(_UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Tuple:
# unwrap multi-line input
__snake_case = re.sub(R"[\\\n]+" , " " , args.base_cmd )
# remove --output_dir if any and set our own
__snake_case = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd )
args.base_cmd += F''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
__snake_case = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Any ) -> str:
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 1_00 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.2222_2222] )} , )
__snake_case = subprocess.run(_UpperCAmelCase , capture_output=_UpperCAmelCase , text=_UpperCAmelCase )
if verbose:
print("STDOUT" , result.stdout )
print("STDERR" , result.stderr )
# save the streams
__snake_case = variation.replace(" " , "-" )
with open(Path(_UpperCAmelCase ) / F'''log.{prefix}.stdout.txt''' , "w" ) as f:
f.write(result.stdout )
with open(Path(_UpperCAmelCase ) / F'''log.{prefix}.stderr.txt''' , "w" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("failed" )
return {target_metric_key: nan}
with io.open(F'''{output_dir}/all_results.json''' , "r" , encoding="utf-8" ) as f:
__snake_case = json.load(_UpperCAmelCase )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict , ) -> Dict:
__snake_case = []
__snake_case = []
__snake_case = F'''{id}: {variation:<{longest_variation_len}}'''
__snake_case = F'''{preamble}: '''
__snake_case = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(_UpperCAmelCase ) , desc=_UpperCAmelCase , leave=_UpperCAmelCase ):
__snake_case = process_run_single(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__snake_case = single_run_metrics[target_metric_key]
if not math.isnan(_UpperCAmelCase ):
metrics.append(_UpperCAmelCase )
results.append(_UpperCAmelCase )
outcome += "✓"
else:
outcome += "✘"
__snake_case = F'''\33[2K\r{outcome}'''
if len(_UpperCAmelCase ) > 0:
__snake_case = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
__snake_case = round(mean_metrics[target_metric_key] , 2 )
__snake_case = F'''{outcome} {mean_target}'''
if len(_UpperCAmelCase ) > 1:
results_str += F''' {tuple(round(_UpperCAmelCase , 2 ) for x in results )}'''
print(_UpperCAmelCase )
__snake_case = variation
return mean_metrics
else:
print(_UpperCAmelCase )
return {variation_key: variation, target_metric_key: nan}
def __UpperCAmelCase ( ) -> Optional[int]:
__snake_case = torch.cuda.get_device_properties(torch.device("cuda" ) )
return F'''
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
'''
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple ) -> List[Any]:
__snake_case = pd.DataFrame(_UpperCAmelCase )
__snake_case = "variation"
__snake_case = "diff_%"
__snake_case = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
__snake_case = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(_UpperCAmelCase ):
# as a fallback, use the minimal value as the sentinel
__snake_case = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(_UpperCAmelCase ):
__snake_case = df.apply(
lambda _UpperCAmelCase : round(1_00 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="columns" , )
# re-order columns
__snake_case = [variation_key, target_metric_key, diff_key, *report_metric_keys]
__snake_case = df.reindex(_UpperCAmelCase , axis="columns" ) # reorder cols
# capitalize
__snake_case = df.rename(str.capitalize , axis="columns" )
# make the cols as narrow as possible
__snake_case = df.rename(lambda _UpperCAmelCase : c.replace("_" , "<br>" ) , axis="columns" )
__snake_case = df.rename(lambda _UpperCAmelCase : c.replace("_" , "\n" ) , axis="columns" )
__snake_case = ["", "Copy between the cut-here-lines and paste as is to github or a forum"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=_UpperCAmelCase , floatfmt=".2f" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=_UpperCAmelCase , floatfmt=".2f" )]
print("\n\n".join(_UpperCAmelCase ) )
def __UpperCAmelCase ( ) -> Dict:
__snake_case = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Base cmd" , )
parser.add_argument(
"--variations" , default=_UpperCAmelCase , type=_UpperCAmelCase , nargs="+" , required=_UpperCAmelCase , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , )
parser.add_argument(
"--base-variation" , default=_UpperCAmelCase , type=_UpperCAmelCase , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , )
parser.add_argument(
"--target-metric-key" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , )
parser.add_argument(
"--report-metric-keys" , default="" , type=_UpperCAmelCase , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , )
parser.add_argument(
"--repeat-times" , default=1 , type=_UpperCAmelCase , help="How many times to re-run each variation - an average will be reported" , )
parser.add_argument(
"--output_dir" , default="output_benchmark" , type=_UpperCAmelCase , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , )
parser.add_argument(
"--verbose" , default=_UpperCAmelCase , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , )
__snake_case = parser.parse_args()
__snake_case = args.output_dir
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
__snake_case = get_base_command(_UpperCAmelCase , _UpperCAmelCase )
# split each dimension into its --foo variations
__snake_case = [list(map(str.strip , re.split(R"\|" , _UpperCAmelCase ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
__snake_case = list(map(str.strip , map(" ".join , itertools.product(*_UpperCAmelCase ) ) ) )
__snake_case = max(len(_UpperCAmelCase ) for x in variations )
# split wanted keys
__snake_case = args.report_metric_keys.split()
# capture prints into a log file for convenience
__snake_case = F'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'''
print(F'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(F'''and this script\'s output is also piped into {report_fn}''' )
__snake_case = Tee(_UpperCAmelCase )
print(F'''\n*** Running {len(_UpperCAmelCase )} benchmarks:''' )
print(F'''Base command: {" ".join(_UpperCAmelCase )}''' )
__snake_case = "variation"
__snake_case = []
for id, variation in enumerate(tqdm(_UpperCAmelCase , desc="Total completion: " , leave=_UpperCAmelCase ) ):
__snake_case = base_cmd + variation.split()
results.append(
process_run(
id + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , args.target_metric_key , _UpperCAmelCase , args.repeat_times , _UpperCAmelCase , args.verbose , ) )
process_results(_UpperCAmelCase , args.target_metric_key , _UpperCAmelCase , args.base_variation , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 680 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : Union[str, Any] = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
a : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 717 |
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def __UpperCAmelCase ( _UpperCAmelCase : Dict ) -> int: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def __UpperCAmelCase ( ) -> Dict:
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
__snake_case = [1, 2, 3]
with pytest.raises(_UpperCAmelCase ):
with parallel_backend("unsupported backend" ):
map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=2 )
with pytest.raises(_UpperCAmelCase ):
with parallel_backend("unsupported backend" ):
map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" , [2, -1] )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] ) -> Optional[int]:
__snake_case = [1, 2]
__snake_case = {"a": 1, "b": 2}
__snake_case = {"a": [1, 2], "b": [3, 4]}
__snake_case = {"a": {"1": 1}, "b": 2}
__snake_case = {"a": 1, "b": 2, "c": 3, "d": 4}
__snake_case = [2, 3]
__snake_case = {"a": 2, "b": 3}
__snake_case = {"a": [2, 3], "b": [4, 5]}
__snake_case = {"a": {"1": 2}, "b": 3}
__snake_case = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
| 680 | 0 |
'''simple docstring'''
import re
def __UpperCAmelCase ( _UpperCAmelCase : str ) -> list:
return [char.split() for char in re.split(R"[^ a-z A-Z 0-9 \s]" , str_ )]
def __UpperCAmelCase ( _UpperCAmelCase : str ) -> str:
__snake_case = split_input(str_ )
return "".join(
["".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : bool , _UpperCAmelCase : str ) -> str:
try:
__snake_case = split_input(a_ )
if upper:
__snake_case = ''''''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
__snake_case = ''''''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def __UpperCAmelCase ( _UpperCAmelCase : str ) -> str:
return to_simple_case(a_ )
def __UpperCAmelCase ( _UpperCAmelCase : str ) -> str:
try:
__snake_case = to_simple_case(a_ )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : bool ) -> str:
return to_complex_case(a_ , a_ , "_" )
def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : bool ) -> str:
return to_complex_case(a_ , a_ , "-" )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 718 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : int = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """mobilenet_v2"""
def __init__( self : Tuple , a_ : int=3 , a_ : int=224 , a_ : List[Any]=1.0 , a_ : List[str]=8 , a_ : Dict=8 , a_ : Optional[Any]=6 , a_ : Optional[Any]=32 , a_ : str=True , a_ : Union[str, Any]=True , a_ : List[Any]="relu6" , a_ : Optional[Any]=True , a_ : Any=0.8 , a_ : Dict=0.02 , a_ : Optional[int]=0.001 , a_ : Optional[int]=255 , **a_ : List[str] , ):
"""simple docstring"""
super().__init__(**a_ )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
__snake_case = num_channels
__snake_case = image_size
__snake_case = depth_multiplier
__snake_case = depth_divisible_by
__snake_case = min_depth
__snake_case = expand_ratio
__snake_case = output_stride
__snake_case = first_layer_is_expansion
__snake_case = finegrained_output
__snake_case = hidden_act
__snake_case = tf_padding
__snake_case = classifier_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = semantic_loss_ignore_index
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = version.parse("""1.11""" )
@property
def A ( self : Optional[int] ):
"""simple docstring"""
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def A ( self : Optional[int] ):
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def A ( self : int ):
"""simple docstring"""
return 1e-4
| 680 | 0 |
'''simple docstring'''
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : str , **_UpperCAmelCase : List[Any] ) -> str:
__snake_case = AutoConfig.from_pretrained(_A , **_A )
__snake_case = AutoModelForSeqaSeqLM.from_config(_A )
model.save_pretrained(_A )
AutoTokenizer.from_pretrained(_A ).save_pretrained(_A )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 719 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : List[Any] = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """data2vec-text"""
def __init__( self : List[str] , a_ : str=30_522 , a_ : Optional[int]=768 , a_ : Dict=12 , a_ : int=12 , a_ : Dict=3_072 , a_ : Dict="gelu" , a_ : Optional[Any]=0.1 , a_ : List[str]=0.1 , a_ : int=512 , a_ : Any=2 , a_ : int=0.02 , a_ : Dict=1e-12 , a_ : Dict=1 , a_ : Any=0 , a_ : Dict=2 , a_ : Optional[int]="absolute" , a_ : List[Any]=True , a_ : Dict=None , **a_ : List[str] , ):
"""simple docstring"""
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = position_embedding_type
__snake_case = use_cache
__snake_case = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
@property
def A ( self : Any ):
"""simple docstring"""
if self.task == "multiple-choice":
__snake_case = {0: "batch", 1: "choice", 2: "sequence"}
else:
__snake_case = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 680 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
__snake_case = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
__snake_case = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
__snake_case = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__snake_case = model(snake_case_ )["last_hidden_state"].detach()
self.assertEqual(output.shape , snake_case_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , snake_case_ , atol=1e-3 ) )
@slow
def A ( self : Tuple ):
"""simple docstring"""
__snake_case = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
__snake_case = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
__snake_case = torch.Size((1, 12, 1_024) ) # batch_size, sequence_length, embedding_vector_dim
__snake_case = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__snake_case = model(snake_case_ )["last_hidden_state"].detach()
self.assertEqual(output.shape , snake_case_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , snake_case_ , atol=1e-3 ) )
| 720 |
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
a : Tuple = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def A ( self : Union[str, Any] , a_ : List[str] , a_ : Optional[int] , a_ : List[str]=None , a_ : Any=None ):
"""simple docstring"""
__snake_case = self.layer[current_layer](a_ , a_ , head_mask[current_layer] )
__snake_case = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"""The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.""" , _UpperCamelCase , )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def __init__( self : int , a_ : int ):
"""simple docstring"""
super().__init__(a_ )
__snake_case = BertEncoderWithPabee(a_ )
self.init_weights()
__snake_case = 0
__snake_case = 0
__snake_case = 0
__snake_case = 0
def A ( self : Optional[int] , a_ : Union[str, Any] ):
"""simple docstring"""
__snake_case = threshold
def A ( self : Optional[Any] , a_ : Union[str, Any] ):
"""simple docstring"""
__snake_case = patience
def A ( self : Any ):
"""simple docstring"""
__snake_case = 0
__snake_case = 0
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.inference_layers_num / self.inference_instances_num
__snake_case = (
f'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
f''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(a_ )
@add_start_docstrings_to_model_forward(a_ )
def A ( self : Dict , a_ : Optional[Any]=None , a_ : Union[str, Any]=None , a_ : int=None , a_ : Optional[int]=None , a_ : int=None , a_ : Optional[Any]=None , a_ : Union[str, Any]=None , a_ : int=None , a_ : Any=None , a_ : Optional[Any]=None , a_ : Any=False , ):
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
__snake_case = input_ids.size()
elif inputs_embeds is not None:
__snake_case = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
__snake_case = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__snake_case = torch.ones(a_ , device=a_ )
if token_type_ids is None:
__snake_case = torch.zeros(a_ , dtype=torch.long , device=a_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__snake_case = self.get_extended_attention_mask(a_ , a_ , a_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
__snake_case , __snake_case , __snake_case = encoder_hidden_states.size()
__snake_case = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
__snake_case = torch.ones(a_ , device=a_ )
__snake_case = self.invert_attention_mask(a_ )
else:
__snake_case = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__snake_case = self.get_head_mask(a_ , self.config.num_hidden_layers )
__snake_case = self.embeddings(
input_ids=a_ , position_ids=a_ , token_type_ids=a_ , inputs_embeds=a_ )
__snake_case = embedding_output
if self.training:
__snake_case = []
for i in range(self.config.num_hidden_layers ):
__snake_case = self.encoder.adaptive_forward(
a_ , current_layer=a_ , attention_mask=a_ , head_mask=a_ )
__snake_case = self.pooler(a_ )
__snake_case = output_layers[i](output_dropout(a_ ) )
res.append(a_ )
elif self.patience == 0: # Use all layers for inference
__snake_case = self.encoder(
a_ , attention_mask=a_ , head_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , )
__snake_case = self.pooler(encoder_outputs[0] )
__snake_case = [output_layers[self.config.num_hidden_layers - 1](a_ )]
else:
__snake_case = 0
__snake_case = None
__snake_case = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
__snake_case = self.encoder.adaptive_forward(
a_ , current_layer=a_ , attention_mask=a_ , head_mask=a_ )
__snake_case = self.pooler(a_ )
__snake_case = output_layers[i](a_ )
if regression:
__snake_case = logits.detach()
if patient_result is not None:
__snake_case = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
__snake_case = 0
else:
__snake_case = logits.detach().argmax(dim=1 )
if patient_result is not None:
__snake_case = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(a_ ) ):
patient_counter += 1
else:
__snake_case = 0
__snake_case = logits
if patient_counter == self.patience:
break
__snake_case = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"""Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """ , _UpperCamelCase , )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def __init__( self : List[str] , a_ : Tuple ):
"""simple docstring"""
super().__init__(a_ )
__snake_case = config.num_labels
__snake_case = BertModelWithPabee(a_ )
__snake_case = nn.Dropout(config.hidden_dropout_prob )
__snake_case = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(a_ )
def A ( self : int , a_ : str=None , a_ : Tuple=None , a_ : Union[str, Any]=None , a_ : List[str]=None , a_ : Optional[int]=None , a_ : Union[str, Any]=None , a_ : Tuple=None , ):
"""simple docstring"""
__snake_case = self.bert(
input_ids=a_ , attention_mask=a_ , token_type_ids=a_ , position_ids=a_ , head_mask=a_ , inputs_embeds=a_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
__snake_case = (logits[-1],)
if labels is not None:
__snake_case = None
__snake_case = 0
for ix, logits_item in enumerate(a_ ):
if self.num_labels == 1:
# We are doing regression
__snake_case = MSELoss()
__snake_case = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
__snake_case = CrossEntropyLoss()
__snake_case = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
__snake_case = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
__snake_case = (total_loss / total_weights,) + outputs
return outputs
| 680 | 0 |
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE = """"""
__SCREAMING_SNAKE_CASE = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
__SCREAMING_SNAKE_CASE = None # compression type in fsspec. ex: "gzip"
__SCREAMING_SNAKE_CASE = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : str , a_ : Union[str, Any] = "" , a_ : Optional[Any] = None , a_ : List[str] = None , **a_ : Optional[int] ):
"""simple docstring"""
super().__init__(self , **_lowercase )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
__snake_case = fsspec.open(
_lowercase , mode="rb" , protocol=_lowercase , compression=self.compression , client_kwargs={
"requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459
"trust_env": True, # Enable reading proxy env variables.
**(target_options or {}).pop("client_kwargs" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
__snake_case = os.path.basename(self.file.path.split("::" )[0] )
__snake_case = (
self.compressed_name[: self.compressed_name.rindex("." )]
if """.""" in self.compressed_name
else self.compressed_name
)
__snake_case = None
@classmethod
def A ( cls : List[str] , a_ : Tuple ):
"""simple docstring"""
return super()._strip_protocol(_lowercase ).lstrip("/" )
def A ( self : List[str] ):
"""simple docstring"""
if self.dir_cache is None:
__snake_case = {**self.file.fs.info(self.file.path ), """name""": self.uncompressed_name}
__snake_case = {f["""name"""]: f}
def A ( self : Tuple , a_ : int ):
"""simple docstring"""
return self.file.open().read()
def A ( self : List[str] , a_ : Optional[Any] , a_ : Optional[int] = "rb" , a_ : int=None , a_ : Any=True , a_ : Union[str, Any]=None , **a_ : List[Any] , ):
"""simple docstring"""
__snake_case = self._strip_protocol(_lowercase )
if mode != "rb":
raise ValueError(f'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE = """bz2"""
__SCREAMING_SNAKE_CASE = """bz2"""
__SCREAMING_SNAKE_CASE = """.bz2"""
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE = """gzip"""
__SCREAMING_SNAKE_CASE = """gzip"""
__SCREAMING_SNAKE_CASE = """.gz"""
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE = """lz4"""
__SCREAMING_SNAKE_CASE = """lz4"""
__SCREAMING_SNAKE_CASE = """.lz4"""
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE = """xz"""
__SCREAMING_SNAKE_CASE = """xz"""
__SCREAMING_SNAKE_CASE = """.xz"""
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE = """zstd"""
__SCREAMING_SNAKE_CASE = """zstd"""
__SCREAMING_SNAKE_CASE = """.zst"""
def __init__( self : str , a_ : Union[str, Any] , a_ : Dict = "rb" , a_ : Dict = None , a_ : List[Any] = None , a_ : str = DEFAULT_BLOCK_SIZE , **a_ : str , ):
"""simple docstring"""
super().__init__(
fo=_lowercase , mode=_lowercase , target_protocol=_lowercase , target_options=_lowercase , block_size=_lowercase , **_lowercase , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
__snake_case = self.file.__enter__
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[Any] , a_ : List[str] ):
"""simple docstring"""
__snake_case = file_
def __enter__( self : List[Any] ):
"""simple docstring"""
self._file.__enter__()
return self
def __exit__( self : List[str] , *a_ : Union[str, Any] , **a_ : Optional[int] ):
"""simple docstring"""
self._file.__exit__(*_lowercase , **_lowercase )
def __iter__( self : Tuple ):
"""simple docstring"""
return iter(self._file )
def A ( self : str ):
"""simple docstring"""
return next(self._file )
def __getattr__( self : str , a_ : List[str] ):
"""simple docstring"""
return getattr(self._file , _lowercase )
def fixed_enter(*a_ : Union[str, Any] , **a_ : List[str] ):
return WrappedFile(_enter(*_lowercase , **_lowercase ) )
__snake_case = fixed_enter
| 721 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self : str , a_ : Tuple , a_ : Optional[Any]=2 , a_ : str=32 , a_ : Dict=16 , a_ : List[str]=3 , a_ : Dict=True , a_ : Optional[int]=True , a_ : List[str]=32 , a_ : int=4 , a_ : str=[0, 1, 2, 3] , a_ : Any=4 , a_ : Optional[int]=37 , a_ : Any="gelu" , a_ : Optional[int]=0.1 , a_ : Optional[Any]=0.1 , a_ : Union[str, Any]=0.02 , a_ : Union[str, Any]=3 , a_ : Any=[1, 384, 24, 24] , a_ : Optional[Any]=True , a_ : Optional[int]=None , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = is_training
__snake_case = use_labels
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = backbone_out_indices
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = backbone_featmap_shape
__snake_case = scope
__snake_case = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__snake_case = (image_size // patch_size) ** 2
__snake_case = num_patches + 1
def A ( self : int ):
"""simple docstring"""
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case = self.get_config()
return config, pixel_values, labels
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [96, 192, 384, 768],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a_ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=a_ , backbone_featmap_shape=self.backbone_featmap_shape , )
def A ( self : int , a_ : Union[str, Any] , a_ : List[str] , a_ : List[str] ):
"""simple docstring"""
__snake_case = DPTModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : List[Any] , a_ : List[Any] , a_ : Union[str, Any] , a_ : List[str] ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = DPTForDepthEstimation(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def A ( self : Optional[Any] , a_ : List[str] , a_ : int , a_ : Tuple ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = DPTForSemanticSegmentation(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , labels=a_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE = (
{
"""depth-estimation""": DPTForDepthEstimation,
"""feature-extraction""": DPTModel,
"""image-segmentation""": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = DPTModelTester(self )
__snake_case = ConfigTester(self , config_class=a_ , has_text_modality=a_ , hidden_size=37 )
def A ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds" )
def A ( self : Any ):
"""simple docstring"""
pass
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_ , nn.Linear ) )
def A ( self : List[str] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a_ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a_ )
def A ( self : int ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*a_ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
def A ( self : Optional[int] ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = True
if model_class in get_values(a_ ):
continue
__snake_case = model_class(a_ )
model.to(a_ )
model.train()
__snake_case = self._prepare_for_class(a_ , a_ , return_labels=a_ )
__snake_case = model(**a_ ).loss
loss.backward()
def A ( self : int ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = False
__snake_case = True
if model_class in get_values(a_ ) or not model_class.supports_gradient_checkpointing:
continue
__snake_case = model_class(a_ )
model.to(a_ )
model.gradient_checkpointing_enable()
model.train()
__snake_case = self._prepare_for_class(a_ , a_ , return_labels=a_ )
__snake_case = model(**a_ ).loss
loss.backward()
def A ( self : Dict ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = _config_zero_init(a_ )
for model_class in self.all_model_classes:
__snake_case = model_class(config=a_ )
# Skip the check for the backbone
__snake_case = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__snake_case = [f'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A ( self : Tuple ):
"""simple docstring"""
pass
@slow
def A ( self : int ):
"""simple docstring"""
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__snake_case = DPTModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def A ( self : int ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = "add"
with self.assertRaises(a_ ):
__snake_case = DPTForDepthEstimation(a_ )
def __UpperCAmelCase ( ) -> Union[str, Any]:
__snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : Dict ):
"""simple docstring"""
__snake_case = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas" )
__snake_case = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas" ).to(a_ )
__snake_case = prepare_img()
__snake_case = image_processor(images=a_ , return_tensors="pt" ).to(a_ )
# forward pass
with torch.no_grad():
__snake_case = model(**a_ )
__snake_case = outputs.predicted_depth
# verify the predicted depth
__snake_case = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , a_ )
__snake_case = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , a_ , atol=1e-4 ) )
| 680 | 0 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self : int , a_ : Tuple , a_ : str=7 , a_ : Tuple=3 , a_ : List[Any]=18 , a_ : List[str]=30 , a_ : Optional[Any]=400 , a_ : Any=True , a_ : Optional[Any]=None , a_ : List[str]=True , ):
"""simple docstring"""
__snake_case = size if size is not None else {"height": 18, "width": 18}
__snake_case = parent
__snake_case = batch_size
__snake_case = num_channels
__snake_case = image_size
__snake_case = min_resolution
__snake_case = max_resolution
__snake_case = do_resize
__snake_case = size
__snake_case = do_normalize
def A ( self : List[str] ):
"""simple docstring"""
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866443634033203, 0.6618829369544983, 0.3891746401786804],
[-0.6042559146881104, -0.02295008860528469, 0.5423797369003296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( _lowercase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = ImageGPTImageProcessor if is_vision_available() else None
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = ImageGPTImageProcessingTester(self )
@property
def A ( self : List[str] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : Any ):
"""simple docstring"""
__snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , "clusters" ) )
self.assertTrue(hasattr(A_ , "do_resize" ) )
self.assertTrue(hasattr(A_ , "size" ) )
self.assertTrue(hasattr(A_ , "do_normalize" ) )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = self.image_processing_class(**self.image_processor_dict )
__snake_case = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(A_ , obj[key] ) )
else:
self.assertEqual(obj[key] , A_ )
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case = os.path.join(A_ , "image_processor.json" )
image_processor_first.to_json_file(A_ )
__snake_case = self.image_processing_class.from_json_file(A_ ).to_dict()
__snake_case = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(A_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , A_ )
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(A_ )
__snake_case = self.image_processing_class.from_pretrained(A_ ).to_dict()
__snake_case = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(A_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , A_ )
@unittest.skip("ImageGPT requires clusters at initialization" )
def A ( self : Optional[Any] ):
"""simple docstring"""
pass
def __UpperCAmelCase ( ) -> int:
__snake_case = load_dataset("hf-internal-testing/fixtures_image_utils" , split="test" )
__snake_case = Image.open(dataset[4]["file"] )
__snake_case = Image.open(dataset[5]["file"] )
__snake_case = [imagea, imagea]
return images
@require_vision
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def A ( self : str ):
"""simple docstring"""
__snake_case = ImageGPTImageProcessor.from_pretrained("openai/imagegpt-small" )
__snake_case = prepare_images()
# test non-batched
__snake_case = image_processing(images[0] , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1_024) )
__snake_case = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , A_ )
# test batched
__snake_case = image_processing(A_ , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1_024) )
__snake_case = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , A_ )
| 700 |
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
def A ( self : Any ):
"""simple docstring"""
return self.__class__(**{k: copy.deepcopy(a_ ) for k, v in self.__dict__.items()} )
| 680 | 0 |
'''simple docstring'''
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
a : Any = get_tests_dir('''fixtures/test_sentencepiece.model''')
a : str = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
a : int = '''pt''' if is_torch_available() else '''tf'''
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = CamembertTokenizer
__SCREAMING_SNAKE_CASE = CamembertTokenizerFast
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
def A ( self : str ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__snake_case = CamembertTokenizer(a_ )
tokenizer.save_pretrained(self.tmpdirname )
def A ( self : int ):
"""simple docstring"""
__snake_case = "<pad>"
__snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>NOTUSED" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(a_ ) , 1_004 )
def A ( self : Optional[int] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_005 )
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = CamembertTokenizer(a_ )
tokenizer.save_pretrained(self.tmpdirname )
__snake_case = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
__snake_case = "I was born in 92000, and this is falsé."
__snake_case = tokenizer.encode(a_ )
__snake_case = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
__snake_case = tokenizer.encode(a_ , add_special_tokens=a_ )
__snake_case = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
__snake_case = tokenizer.convert_ids_to_tokens(a_ )
__snake_case = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
def A ( self : Dict ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__snake_case = self.get_tokenizer()
__snake_case = self.get_rust_tokenizer()
__snake_case = "I was born in 92000, and this is falsé."
__snake_case = tokenizer.tokenize(a_ )
__snake_case = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
__snake_case = tokenizer.encode(a_ , add_special_tokens=a_ )
__snake_case = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
__snake_case = self.get_rust_tokenizer()
__snake_case = tokenizer.encode(a_ )
__snake_case = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
@slow
def A ( self : Any ):
"""simple docstring"""
__snake_case = {"input_ids": [[5, 54, 7_196, 297, 30, 23, 776, 18, 11, 3_215, 3_705, 8_252, 22, 3_164, 1_181, 2_116, 29, 16, 813, 25, 791, 3_314, 20, 3_446, 38, 27_575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9_088, 20, 1_517, 8, 22_804, 18_818, 10, 38, 629, 607, 607, 142, 19, 7_196, 867, 56, 10_326, 24, 2_267, 20, 416, 5_072, 15_612, 233, 734, 7, 2_399, 27, 16, 3_015, 1_649, 7, 24, 20, 4_338, 2_399, 27, 13, 3_400, 14, 13, 6_189, 8, 930, 9, 6]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
__snake_case = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name="camembert-base" , revision="3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf" , sequences=a_ , )
| 701 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
a : Optional[Any] = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : int ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = "A painting of a squirrel eating a burger "
__snake_case = torch.manual_seed(0 )
__snake_case = pipe(
prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a_ )
__snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained(a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = generator.manual_seed(0 )
__snake_case = pipe(
prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = "A painting of a squirrel eating a burger "
__snake_case = torch.manual_seed(0 )
__snake_case = pipe(
prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
__snake_case = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 680 | 0 |
'''simple docstring'''
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : List[str] , a_ : List[str] = 16 , a_ : List[Any] = 88 , a_ : Optional[int] = None , a_ : List[Any] = 1 , a_ : List[str] = 0.0 , a_ : Any = 32 , a_ : Optional[int] = None , a_ : str = False , a_ : Dict = None , a_ : Any = None , a_ : List[str] = "geglu" , a_ : Tuple = None , ):
"""simple docstring"""
super().__init__()
__snake_case = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=a_ , attention_head_dim=a_ , in_channels=a_ , num_layers=a_ , dropout=a_ , norm_num_groups=a_ , cross_attention_dim=a_ , attention_bias=a_ , sample_size=a_ , num_vector_embeds=a_ , activation_fn=a_ , num_embeds_ada_norm=a_ , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
__snake_case = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
__snake_case = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
__snake_case = [1, 0]
def A ( self : Optional[int] , a_ : Union[str, Any] , a_ : str , a_ : Optional[int]=None , a_ : Dict=None , a_ : str=None , a_ : List[str] = True , ):
"""simple docstring"""
__snake_case = hidden_states
__snake_case = []
__snake_case = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
__snake_case = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
__snake_case = self.transformer_index_for_condition[i]
__snake_case = self.transformers[transformer_index](
a_ , encoder_hidden_states=a_ , timestep=a_ , cross_attention_kwargs=a_ , return_dict=a_ , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
__snake_case = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
__snake_case = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=a_ )
| 702 |
'''simple docstring'''
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('''>=''', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
a : Any = get_logger(__name__)
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any]=0 ) -> Any:
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
with FSDP.state_dict_type(
_UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__snake_case = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__snake_case = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if accelerator.process_index == 0:
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__snake_case = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__snake_case = os.path.join(_UpperCAmelCase , F'''{MODEL_NAME}_{model_index}''' )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
logger.info(F'''Saving model to {ckpt_dir}''' )
__snake_case = {"model": state_dict}
dist_cp.save_state_dict(
state_dict=_UpperCAmelCase , storage_writer=dist_cp.FileSystemWriter(_UpperCAmelCase ) , planner=DefaultSavePlanner() , )
logger.info(F'''Model saved to {ckpt_dir}''' )
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : str=0 ) -> List[str]:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
_UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(_UpperCAmelCase ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"Set the `sync_module_states` flag to `True` so that model states are synced across processes when "
"initializing FSDP object" )
return
__snake_case = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Loading model from {input_model_file}''' )
__snake_case = torch.load(_UpperCAmelCase )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__snake_case = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Loading model from {input_model_file}''' )
__snake_case = torch.load(_UpperCAmelCase )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__snake_case = (
os.path.join(_UpperCAmelCase , F'''{MODEL_NAME}_{model_index}''' )
if F'''{MODEL_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading model from {ckpt_dir}''' )
__snake_case = {"model": model.state_dict()}
dist_cp.load_state_dict(
state_dict=_UpperCAmelCase , storage_reader=dist_cp.FileSystemReader(_UpperCAmelCase ) , planner=DefaultLoadPlanner() , )
__snake_case = state_dict["model"]
logger.info(F'''Model loaded from {ckpt_dir}''' )
model.load_state_dict(_UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple=0 ) -> Union[str, Any]:
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
with FSDP.state_dict_type(
_UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__snake_case = FSDP.optim_state_dict(_UpperCAmelCase , _UpperCAmelCase )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
__snake_case = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Saving Optimizer state to {output_optimizer_file}''' )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Optimizer state saved in {output_optimizer_file}''' )
else:
__snake_case = os.path.join(_UpperCAmelCase , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
logger.info(F'''Saving Optimizer state to {ckpt_dir}''' )
dist_cp.save_state_dict(
state_dict={"optimizer": optim_state} , storage_writer=dist_cp.FileSystemWriter(_UpperCAmelCase ) , planner=DefaultSavePlanner() , )
logger.info(F'''Optimizer state saved in {ckpt_dir}''' )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int]=0 ) -> Union[str, Any]:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
_UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__snake_case = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
__snake_case = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Loading Optimizer state from {input_optimizer_file}''' )
__snake_case = torch.load(_UpperCAmelCase )
logger.info(F'''Optimizer state loaded from {input_optimizer_file}''' )
else:
__snake_case = (
os.path.join(_UpperCAmelCase , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
if F'''{OPTIMIZER_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading Optimizer from {ckpt_dir}''' )
__snake_case = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="optimizer" , storage_reader=dist_cp.FileSystemReader(_UpperCAmelCase ) , )
__snake_case = optim_state["optimizer"]
logger.info(F'''Optimizer loaded from {ckpt_dir}''' )
__snake_case = FSDP.optim_state_dict_to_load(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
optimizer.load_state_dict(_UpperCAmelCase )
| 680 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
def __init__( self : Optional[Any] , a_ : List[str] , a_ : Dict=13 , a_ : str=7 , a_ : int=True , a_ : Union[str, Any]=True , a_ : Union[str, Any]=False , a_ : Tuple=True , a_ : Any=99 , a_ : Any=32 , a_ : Dict=5 , a_ : Optional[int]=4 , a_ : Tuple=37 , a_ : Dict="gelu" , a_ : Dict=0.1 , a_ : Any=0.1 , a_ : List[Any]=512 , a_ : Union[str, Any]=16 , a_ : Any=2 , a_ : List[str]=0.02 , a_ : Optional[Any]=3 , a_ : Union[str, Any]=4 , a_ : Tuple=None , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = scope
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case = ids_tensor([self.batch_size] , self.num_choices )
__snake_case = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : Optional[Any] ):
"""simple docstring"""
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def A ( self : Any , a_ : Optional[int] , a_ : str , a_ : List[str] , a_ : Tuple , a_ : Union[str, Any] , a_ : Any ):
"""simple docstring"""
__snake_case = DistilBertModel(config=__A )
model.to(__A )
model.eval()
__snake_case = model(__A , __A )
__snake_case = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Optional[Any] , a_ : List[str] , a_ : int , a_ : List[Any] , a_ : str , a_ : int , a_ : int ):
"""simple docstring"""
__snake_case = DistilBertForMaskedLM(config=__A )
model.to(__A )
model.eval()
__snake_case = model(__A , attention_mask=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : List[str] , a_ : Tuple , a_ : Optional[int] , a_ : Optional[int] , a_ : Tuple , a_ : Dict , a_ : Any ):
"""simple docstring"""
__snake_case = DistilBertForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
__snake_case = model(
__A , attention_mask=__A , start_positions=__A , end_positions=__A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : int , a_ : str , a_ : Optional[Any] , a_ : Optional[Any] , a_ : Optional[int] , a_ : List[Any] , a_ : Dict ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = DistilBertForSequenceClassification(__A )
model.to(__A )
model.eval()
__snake_case = model(__A , attention_mask=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Tuple , a_ : Dict , a_ : Optional[Any] , a_ : int , a_ : List[str] , a_ : Tuple , a_ : Any ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = DistilBertForTokenClassification(config=__A )
model.to(__A )
model.eval()
__snake_case = model(__A , attention_mask=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : List[Any] , a_ : str , a_ : str , a_ : Dict , a_ : Optional[Any] , a_ : Any , a_ : Tuple ):
"""simple docstring"""
__snake_case = self.num_choices
__snake_case = DistilBertForMultipleChoice(config=__A )
model.to(__A )
model.eval()
__snake_case = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case = model(
__A , attention_mask=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : str ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
(__snake_case) = config_and_inputs
__snake_case = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
__SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": DistilBertModel,
"""fill-mask""": DistilBertForMaskedLM,
"""question-answering""": DistilBertForQuestionAnswering,
"""text-classification""": DistilBertForSequenceClassification,
"""token-classification""": DistilBertForTokenClassification,
"""zero-shot""": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
def A ( self : Tuple ):
"""simple docstring"""
__snake_case = DistilBertModelTester(self )
__snake_case = ConfigTester(self , config_class=__A , dim=37 )
def A ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : Any ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__A )
def A ( self : int ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__A )
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__A )
def A ( self : str ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__A )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__A )
def A ( self : str ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__A )
@slow
def A ( self : List[Any] ):
"""simple docstring"""
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = DistilBertModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@slow
@require_torch_gpu
def A ( self : Dict ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
__snake_case = True
__snake_case = model_class(config=__A )
__snake_case = self._prepare_for_class(__A , __A )
__snake_case = torch.jit.trace(
__A , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__A , os.path.join(__A , "traced_model.pt" ) )
__snake_case = torch.jit.load(os.path.join(__A , "traced_model.pt" ) , map_location=__A )
loaded(inputs_dict["input_ids"].to(__A ) , inputs_dict["attention_mask"].to(__A ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = DistilBertModel.from_pretrained("distilbert-base-uncased" )
__snake_case = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
__snake_case = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__snake_case = model(__A , attention_mask=__A )[0]
__snake_case = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __A )
__snake_case = torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __A , atol=1e-4 ) )
| 703 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
__snake_case = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_UpperCAmelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a : Tuple = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = ["FNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = ["FNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
a : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 704 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> str:
if number > 0:
raise ValueError("input must be a negative integer" )
__snake_case = len(bin(_UpperCAmelCase )[3:] )
__snake_case = bin(abs(_UpperCAmelCase ) - (1 << binary_number_length) )[3:]
__snake_case = (
(
"1"
+ "0" * (binary_number_length - len(_UpperCAmelCase ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 0 |
'''simple docstring'''
from random import shuffle
import tensorflow as tf
from numpy import array
def __UpperCAmelCase ( _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[Any] ) -> str:
__snake_case = int(_UpperCAmelCase )
assert noofclusters < len(_UpperCAmelCase )
# Find out the dimensionality
__snake_case = len(vectors[0] )
# Will help select random centroids from among the available vectors
__snake_case = list(range(len(_UpperCAmelCase ) ) )
shuffle(_UpperCAmelCase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
__snake_case = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
__snake_case = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
__snake_case = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(_UpperCAmelCase )
]
##These nodes will assign the centroid Variables the appropriate
##values
__snake_case = tf.placeholder("float64" , [dim] )
__snake_case = []
for centroid in centroids:
cent_assigns.append(tf.assign(_UpperCAmelCase , _UpperCAmelCase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
__snake_case = [tf.Variable(0 ) for i in range(len(_UpperCAmelCase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
__snake_case = tf.placeholder("int32" )
__snake_case = []
for assignment in assignments:
cluster_assigns.append(tf.assign(_UpperCAmelCase , _UpperCAmelCase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
__snake_case = tf.placeholder("float" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
__snake_case = tf.reduce_mean(_UpperCAmelCase , 0 )
##Node for computing Euclidean distances
# Placeholders for input
__snake_case = tf.placeholder("float" , [dim] )
__snake_case = tf.placeholder("float" , [dim] )
__snake_case = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(_UpperCAmelCase , _UpperCAmelCase ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
__snake_case = tf.placeholder("float" , [noofclusters] )
__snake_case = tf.argmin(_UpperCAmelCase , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
__snake_case = tf.initialize_all_variables()
# Initialize all variables
sess.run(_UpperCAmelCase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
__snake_case = 1_00
for _ in range(_UpperCAmelCase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(_UpperCAmelCase ) ):
__snake_case = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
__snake_case = [
sess.run(_UpperCAmelCase , feed_dict={va: vect, va: sess.run(_UpperCAmelCase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
__snake_case = sess.run(
_UpperCAmelCase , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(_UpperCAmelCase ):
# Collect all the vectors assigned to this cluster
__snake_case = [
vectors[i]
for i in range(len(_UpperCAmelCase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
__snake_case = sess.run(
_UpperCAmelCase , feed_dict={mean_input: array(_UpperCAmelCase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
__snake_case = sess.run(_UpperCAmelCase )
__snake_case = sess.run(_UpperCAmelCase )
return centroids, assignments
| 705 |
'''simple docstring'''
from timeit import timeit
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> int:
if number < 0:
raise ValueError("the value of input must not be negative" )
__snake_case = 0
while number:
number &= number - 1
result += 1
return result
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> int:
if number < 0:
raise ValueError("the value of input must not be negative" )
__snake_case = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def __UpperCAmelCase ( ) -> None:
def do_benchmark(_UpperCAmelCase : int ) -> None:
__snake_case = "import __main__ as z"
print(F'''Benchmark when {number = }:''' )
print(F'''{get_set_bits_count_using_modulo_operator(_UpperCAmelCase ) = }''' )
__snake_case = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=_UpperCAmelCase )
print(F'''timeit() runs in {timing} seconds''' )
print(F'''{get_set_bits_count_using_brian_kernighans_algorithm(_UpperCAmelCase ) = }''' )
__snake_case = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=_UpperCAmelCase , )
print(F'''timeit() runs in {timing} seconds''' )
for number in (25, 37, 58, 0):
do_benchmark(_UpperCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 680 | 0 |
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def __UpperCAmelCase ( _UpperCAmelCase : Namespace ) -> Optional[int]:
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
a : Union[str, Any] = '''
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
'''
class SCREAMING_SNAKE_CASE__ ( __a ):
@staticmethod
def A ( a_ : ArgumentParser ):
"""simple docstring"""
__snake_case = parser.add_parser(
"convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , )
train_parser.add_argument("--model_type" , type=a_ , required=a_ , help="Model's type." )
train_parser.add_argument(
"--tf_checkpoint" , type=a_ , required=a_ , help="TensorFlow checkpoint path or folder." )
train_parser.add_argument(
"--pytorch_dump_output" , type=a_ , required=a_ , help="Path to the PyTorch saved model output." )
train_parser.add_argument("--config" , type=a_ , default="" , help="Configuration file path or folder." )
train_parser.add_argument(
"--finetuning_task_name" , type=a_ , default=a_ , help="Optional fine-tuning task name if the TF model was a finetuned model." , )
train_parser.set_defaults(func=a_ )
def __init__( self : Any , a_ : str , a_ : str , a_ : str , a_ : str , a_ : str , *a_ : int , ):
"""simple docstring"""
__snake_case = logging.get_logger("transformers-cli/converting" )
self._logger.info(f'''Loading model {model_type}''' )
__snake_case = model_type
__snake_case = tf_checkpoint
__snake_case = pytorch_dump_output
__snake_case = config
__snake_case = finetuning_task_name
def A ( self : Union[str, Any] ):
"""simple docstring"""
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(a_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a_ )
if "ckpt" in self._tf_checkpoint.lower():
__snake_case = self._tf_checkpoint
__snake_case = """"""
else:
__snake_case = self._tf_checkpoint
__snake_case = """"""
convert_transfo_xl_checkpoint_to_pytorch(
a_ , self._config , self._pytorch_dump_output , a_ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a_ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a_ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
| 706 |
'''simple docstring'''
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
a : Dict = '''sshleifer/bart-tiny-random'''
a : str = '''patrickvonplaten/t5-tiny-random'''
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def A ( self : Union[str, Any] ):
"""simple docstring"""
return AutoConfig.from_pretrained(a_ )
def A ( self : str ):
"""simple docstring"""
__snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=a_ )
def A ( self : Dict ):
"""simple docstring"""
__snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=a_ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def A ( self : Dict ):
"""simple docstring"""
with self.assertRaises(a_ ):
create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=a_ , d=a_ )
| 680 | 0 |
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
a : Tuple = logging.get_logger(__name__)
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] ) -> int:
__snake_case = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("Quantized models are not supported." )
__snake_case = re.match(R"^mobilenet_v1_([^_]*)_([^_]*)$" , __snake_case )
if matches:
__snake_case = float(matches[1] )
__snake_case = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__snake_case = 10_01
__snake_case = "imagenet-1k-id2label.json"
__snake_case = "huggingface/label-files"
__snake_case = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type="dataset" ) , "r" ) )
__snake_case = {int(__snake_case ) + 1: v for k, v in idalabel.items()}
__snake_case = "background"
__snake_case = idalabel
__snake_case = {v: k for k, v in idalabel.items()}
return config
def __UpperCAmelCase ( ) -> Optional[Any]:
__snake_case = "http://images.cocodataset.org/val2017/000000039769.jpg"
__snake_case = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any]=False ) -> List[str]:
__snake_case = get_mobilenet_va_config(__snake_case )
# Load 🤗 model
__snake_case = MobileNetVaForImageClassification(__snake_case ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(__snake_case , __snake_case , __snake_case )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__snake_case = MobileNetVaImageProcessor(
crop_size={"width": config.image_size, "height": config.image_size} , size={"shortest_edge": config.image_size + 32} , )
__snake_case = image_processor(images=prepare_img() , return_tensors="pt" )
__snake_case = model(**__snake_case )
__snake_case = outputs.logits
assert logits.shape == (1, 10_01)
if model_name == "mobilenet_v1_1.0_224":
__snake_case = torch.tensor([-4.1739, -1.1233, 3.1205] )
elif model_name == "mobilenet_v1_0.75_192":
__snake_case = torch.tensor([-3.9440, -2.3141, -0.3333] )
else:
__snake_case = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , __snake_case , atol=1E-4 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__snake_case )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__snake_case )
if push_to_hub:
print("Pushing to the hub..." )
__snake_case = "google/" + model_name
image_processor.push_to_hub(__snake_case )
model.push_to_hub(__snake_case )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''mobilenet_v1_1.0_224''',
type=str,
help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''',
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
a : str = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 707 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
a : Any = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """sequence-classification"""
def __init__( self : List[str] , a_ : str ):
"""simple docstring"""
if type(a_ ) == dict:
__snake_case = Namespace(**a_ )
__snake_case = glue_output_modes[hparams.task]
__snake_case = glue_tasks_num_labels[hparams.task]
super().__init__(a_ , a_ , self.mode )
def A ( self : Union[str, Any] , **a_ : List[Any] ):
"""simple docstring"""
return self.model(**a_ )
def A ( self : int , a_ : Optional[Any] , a_ : int ):
"""simple docstring"""
__snake_case = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__snake_case = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
__snake_case = self(**a_ )
__snake_case = outputs[0]
__snake_case = self.trainer.lr_schedulers[0]["scheduler"]
__snake_case = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = self.hparams
__snake_case = processors[args.task]()
__snake_case = processor.get_labels()
for mode in ["train", "dev"]:
__snake_case = self._feature_file(a_ )
if os.path.exists(a_ ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , a_ )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
__snake_case = (
processor.get_dev_examples(args.data_dir )
if mode == "dev"
else processor.get_train_examples(args.data_dir )
)
__snake_case = convert_examples_to_features(
a_ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("Saving features into cached file %s" , a_ )
torch.save(a_ , a_ )
def A ( self : Optional[int] , a_ : str , a_ : int , a_ : bool = False ):
"""simple docstring"""
__snake_case = "dev" if mode == "test" else mode
__snake_case = self._feature_file(a_ )
logger.info("Loading features from cached file %s" , a_ )
__snake_case = torch.load(a_ )
__snake_case = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
__snake_case = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
__snake_case = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
__snake_case = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
__snake_case = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(a_ , a_ , a_ , a_ ) , batch_size=a_ , shuffle=a_ , )
def A ( self : int , a_ : List[str] , a_ : Tuple ):
"""simple docstring"""
__snake_case = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__snake_case = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
__snake_case = self(**a_ )
__snake_case , __snake_case = outputs[:2]
__snake_case = logits.detach().cpu().numpy()
__snake_case = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def A ( self : Dict , a_ : Optional[int] ):
"""simple docstring"""
__snake_case = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item()
__snake_case = np.concatenate([x["pred"] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
__snake_case = np.argmax(a_ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
__snake_case = np.squeeze(a_ )
__snake_case = np.concatenate([x["target"] for x in outputs] , axis=0 )
__snake_case = [[] for _ in range(out_label_ids.shape[0] )]
__snake_case = [[] for _ in range(out_label_ids.shape[0] )]
__snake_case = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , a_ , a_ )}
__snake_case = dict(results.items() )
__snake_case = results
return ret, preds_list, out_label_list
def A ( self : Tuple , a_ : list ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case = self._eval_end(a_ )
__snake_case = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def A ( self : int , a_ : Tuple ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case = self._eval_end(a_ )
__snake_case = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def A ( a_ : str , a_ : Any ):
"""simple docstring"""
BaseTransformer.add_model_specific_args(a_ , a_ )
parser.add_argument(
"--max_seq_length" , default=128 , type=a_ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--task" , default="" , type=a_ , required=a_ , help="The GLUE task to run" , )
parser.add_argument(
"--gpus" , default=0 , type=a_ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
def __UpperCAmelCase ( ) -> Union[str, Any]:
__snake_case = argparse.ArgumentParser()
add_generic_args(_UpperCAmelCase , os.getcwd() )
__snake_case = GLUETransformer.add_model_specific_args(_UpperCAmelCase , os.getcwd() )
__snake_case = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
__snake_case = os.path.join(
"./results" , F'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' , )
os.makedirs(args.output_dir )
__snake_case = GLUETransformer(_UpperCAmelCase )
__snake_case = generic_train(_UpperCAmelCase , _UpperCAmelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
__snake_case = sorted(glob.glob(os.path.join(args.output_dir , "checkpoint-epoch=*.ckpt" ) , recursive=_UpperCAmelCase ) )
__snake_case = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_UpperCAmelCase )
if __name__ == "__main__":
main()
| 680 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
a : int = logging.get_logger(__name__)
a : Optional[Any] = {
"EleutherAI/gpt-j-6B": "https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = """gptj"""
__SCREAMING_SNAKE_CASE = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : int , a_ : str=50_400 , a_ : Any=2_048 , a_ : Any=4_096 , a_ : int=28 , a_ : List[str]=16 , a_ : Optional[Any]=64 , a_ : Union[str, Any]=None , a_ : Dict="gelu_new" , a_ : int=0.0 , a_ : Union[str, Any]=0.0 , a_ : int=0.0 , a_ : Any=1e-5 , a_ : Optional[int]=0.02 , a_ : str=True , a_ : str=50_256 , a_ : List[str]=50_256 , a_ : str=False , **a_ : str , ):
"""simple docstring"""
__snake_case = vocab_size
__snake_case = n_positions
__snake_case = n_embd
__snake_case = n_layer
__snake_case = n_head
__snake_case = n_inner
__snake_case = rotary_dim
__snake_case = activation_function
__snake_case = resid_pdrop
__snake_case = embd_pdrop
__snake_case = attn_pdrop
__snake_case = layer_norm_epsilon
__snake_case = initializer_range
__snake_case = use_cache
__snake_case = bos_token_id
__snake_case = eos_token_id
super().__init__(
bos_token_id=lowercase_ , eos_token_id=lowercase_ , tie_word_embeddings=lowercase_ , **lowercase_ )
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__( self : Union[str, Any] , a_ : PretrainedConfig , a_ : str = "default" , a_ : List[PatchingSpec] = None , a_ : bool = False , ):
"""simple docstring"""
super().__init__(lowercase_ , task=lowercase_ , patching_specs=lowercase_ , use_past=lowercase_ )
if not getattr(self._config , "pad_token_id" , lowercase_ ):
# TODO: how to do that better?
__snake_case = 0
@property
def A ( self : int ):
"""simple docstring"""
__snake_case = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(lowercase_ , direction="inputs" )
__snake_case = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__snake_case = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def A ( self : Optional[Any] ):
"""simple docstring"""
return self._config.n_layer
@property
def A ( self : Dict ):
"""simple docstring"""
return self._config.n_head
def A ( self : Dict , a_ : PreTrainedTokenizer , a_ : int = -1 , a_ : int = -1 , a_ : bool = False , a_ : Optional[TensorType] = None , ):
"""simple docstring"""
__snake_case = super(lowercase_ , self ).generate_dummy_inputs(
lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ )
# We need to order the input in the way they appears in the forward()
__snake_case = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__snake_case = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__snake_case = seqlen + 2
__snake_case = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__snake_case = [
(torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) for _ in range(self.num_layers )
]
__snake_case = common_inputs["""attention_mask"""]
if self.use_past:
__snake_case = ordered_inputs["""attention_mask"""].dtype
__snake_case = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(lowercase_ , lowercase_ , dtype=lowercase_ )] , dim=1 )
return ordered_inputs
@property
def A ( self : Union[str, Any] ):
"""simple docstring"""
return 13
| 708 |
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("dataset_size" , [None, 4_00 * 2**20, 6_00 * 2**20] )
@pytest.mark.parametrize("input_in_memory_max_size" , ["default", 0, 1_00 * 2**20, 9_00 * 2**20] )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> int:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , "IN_MEMORY_MAX_SIZE" , _UpperCAmelCase )
__snake_case = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
__snake_case = dataset_size < in_memory_max_size
else:
__snake_case = False
__snake_case = is_small_dataset(_UpperCAmelCase )
assert result == expected
| 680 | 0 |
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( _UpperCAmelCase : list ) -> list:
if len(_lowercase ) == 0:
return []
__snake_case = min(_lowercase ), max(_lowercase )
__snake_case = int(max_value - min_value ) + 1
__snake_case = [[] for _ in range(_lowercase )]
for i in my_list:
buckets[int(i - min_value )].append(_lowercase )
return [v for bucket in buckets for v in sorted(_lowercase )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 709 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : float ) -> float:
if edge <= 0 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("Length must be a positive." )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def __UpperCAmelCase ( _UpperCAmelCase : float ) -> float:
if edge <= 0 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("Length must be a positive." )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 0 |
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float ) -> int:
if days_between_payments <= 0:
raise ValueError("days_between_payments must be > 0" )
if daily_interest_rate < 0:
raise ValueError("daily_interest_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * daily_interest_rate * days_between_payments
def __UpperCAmelCase ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float , ) -> str:
if number_of_compounding_periods <= 0:
raise ValueError("number_of_compounding_periods must be > 0" )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("nominal_annual_interest_rate_percentage must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def __UpperCAmelCase ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float , ) -> int:
if number_of_years <= 0:
raise ValueError("number_of_years must be > 0" )
if nominal_annual_percentage_rate < 0:
raise ValueError("nominal_annual_percentage_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return compound_interest(
UpperCAmelCase__ , nominal_annual_percentage_rate / 3_65 , number_of_years * 3_65 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
'''simple docstring'''
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
a : Any = 6_378_137.0
a : List[Any] = 6_356_752.314_245
a : Dict = 6_378_137
def __UpperCAmelCase ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float ) -> float:
__snake_case = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
__snake_case = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
__snake_case = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
__snake_case = haversine_distance(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
__snake_case = (b_lata + b_lata) / 2
__snake_case = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
__snake_case = (sin(_UpperCAmelCase ) ** 2) * (cos(_UpperCAmelCase ) ** 2)
__snake_case = cos(sigma / 2 ) ** 2
__snake_case = (sigma - sin(_UpperCAmelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
__snake_case = (cos(_UpperCAmelCase ) ** 2) * (sin(_UpperCAmelCase ) ** 2)
__snake_case = sin(sigma / 2 ) ** 2
__snake_case = (sigma + sin(_UpperCAmelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 0 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = "ylacombe/bark-small"
__snake_case = tempfile.mkdtemp()
__snake_case = "en_speaker_1"
__snake_case = "This is a test string"
__snake_case = "speaker_embeddings_path.json"
__snake_case = "speaker_embeddings"
def A ( self : List[str] , **a_ : Dict ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase_ )
def A ( self : int ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def A ( self : Tuple ):
"""simple docstring"""
__snake_case = self.get_tokenizer()
__snake_case = BarkProcessor(tokenizer=UpperCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
__snake_case = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
__snake_case = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
__snake_case = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def A ( self : Any ):
"""simple docstring"""
__snake_case = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
__snake_case = 35
__snake_case = 2
__snake_case = 8
__snake_case = {
"semantic_prompt": np.ones(UpperCAmelCase_ ),
"coarse_prompt": np.ones((nb_codebooks_coarse, seq_len) ),
"fine_prompt": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
__snake_case = processor(text=self.input_string , voice_preset=UpperCAmelCase_ )
__snake_case = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
__snake_case = os.path.join(self.tmpdirname , "file.npz" )
np.savez(UpperCAmelCase_ , **UpperCAmelCase_ )
__snake_case = processor(text=self.input_string , voice_preset=UpperCAmelCase_ )
__snake_case = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
__snake_case = processor(text=self.input_string , voice_preset=self.voice_preset )
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = self.get_tokenizer()
__snake_case = BarkProcessor(tokenizer=UpperCAmelCase_ )
__snake_case = processor(text=self.input_string )
__snake_case = tokenizer(
self.input_string , padding="max_length" , max_length=256 , add_special_tokens=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 711 |
'''simple docstring'''
import math
import sys
import cva
import numpy as np
def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float ) -> np.ndarray:
# For applying gaussian function for each element in matrix.
__snake_case = math.sqrt(_UpperCAmelCase )
__snake_case = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> np.ndarray:
__snake_case = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : float ) -> np.ndarray:
# Creates a gaussian kernel of given dimension.
__snake_case = np.zeros((kernel_size, kernel_size) )
for i in range(0 , _UpperCAmelCase ):
for j in range(0 , _UpperCAmelCase ):
__snake_case = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_UpperCAmelCase , _UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : int , ) -> np.ndarray:
__snake_case = np.zeros(img.shape )
__snake_case = get_gauss_kernel(_UpperCAmelCase , _UpperCAmelCase )
__snake_case , __snake_case = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__snake_case = get_slice(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__snake_case = img_s - img_s[kernel_size // 2, kernel_size // 2]
__snake_case = vec_gaussian(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = np.multiply(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = np.multiply(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = np.sum(_UpperCAmelCase ) / np.sum(_UpperCAmelCase )
__snake_case = val
return imga
def __UpperCAmelCase ( _UpperCAmelCase : list ) -> tuple:
__snake_case = args[1] if args[1:] else "../image_data/lena.jpg"
__snake_case = float(args[2] ) if args[2:] else 1.0
__snake_case = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__snake_case = int(args[4] )
__snake_case = kernel_size + abs(kernel_size % 2 - 1 )
else:
__snake_case = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
a , a , a , a : Tuple = parse_args(sys.argv)
a : Tuple = cva.imread(filename, 0)
cva.imshow('''input image''', img)
a : Dict = img / 255
a : str = out.astype('''float32''')
a : Union[str, Any] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
a : Dict = out * 255
a : List[str] = np.uinta(out)
cva.imshow('''output image''', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 680 | 0 |
'''simple docstring'''
from __future__ import annotations
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Tuple , a_ : int ):
"""simple docstring"""
__snake_case = data
__snake_case = None
__snake_case = None
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] ) -> Tuple: # In Order traversal of the tree
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] ) -> Any:
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def __UpperCAmelCase ( _UpperCAmelCase : str ) -> str:
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def __UpperCAmelCase ( ) -> str: # Main function for testing.
__snake_case = Node(1 )
__snake_case = Node(2 )
__snake_case = Node(3 )
__snake_case = Node(4 )
__snake_case = Node(5 )
__snake_case = Node(6 )
__snake_case = Node(7 )
__snake_case = Node(8 )
__snake_case = Node(9 )
print(is_full_binary_tree(_lowercase ) )
print(depth_of_tree(_lowercase ) )
print("Tree is: " )
display(_lowercase )
if __name__ == "__main__":
main()
| 712 |
'''simple docstring'''
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any , a_ : Dict , a_ : Union[str, Any] , a_ : Tuple ):
"""simple docstring"""
__snake_case = name
__snake_case = value
__snake_case = weight
def __repr__( self : Optional[int] ):
"""simple docstring"""
return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def A ( self : Any ):
"""simple docstring"""
return self.value
def A ( self : str ):
"""simple docstring"""
return self.name
def A ( self : int ):
"""simple docstring"""
return self.weight
def A ( self : Tuple ):
"""simple docstring"""
return self.value / self.weight
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
__snake_case = []
for i in range(len(_UpperCAmelCase ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> int:
__snake_case = sorted(_UpperCAmelCase , key=_UpperCAmelCase , reverse=_UpperCAmelCase )
__snake_case = []
__snake_case , __snake_case = 0.0, 0.0
for i in range(len(_UpperCAmelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __UpperCAmelCase ( ) -> Optional[Any]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 0 |
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( _UpperCAmelCase : Tuple ) -> int:
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(__UpperCamelCase ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(__UpperCamelCase ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 |
'''simple docstring'''
import os
from math import logaa
def __UpperCAmelCase ( _UpperCAmelCase : str = "base_exp.txt" ) -> int:
__snake_case = 0
__snake_case = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(_UpperCAmelCase ) , _UpperCAmelCase ) ) ):
__snake_case , __snake_case = list(map(_UpperCAmelCase , line.split("," ) ) )
if x * logaa(_UpperCAmelCase ) > largest:
__snake_case = x * logaa(_UpperCAmelCase )
__snake_case = i + 1
return result
if __name__ == "__main__":
print(solution())
| 680 | 0 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE__ :
@staticmethod
def A ( *a_ : Tuple , **a_ : Optional[int] ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def A ( self : int , a_ : str , a_ : Union[str, Any] , a_ : Tuple ):
"""simple docstring"""
__snake_case = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
__snake_case = [
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
]
return object_detector, examples
def A ( self : str , a_ : List[str] , a_ : List[str] ):
"""simple docstring"""
__snake_case = object_detector(examples[0] , threshold=0.0 )
__snake_case = len(a_ )
self.assertGreater(a_ , 0 )
self.assertEqual(
a_ , [
{
"score": ANY(a_ ),
"label": ANY(a_ ),
"box": {"xmin": ANY(a_ ), "ymin": ANY(a_ ), "xmax": ANY(a_ ), "ymax": ANY(a_ )},
}
for i in range(a_ )
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def A ( self : List[Any] ):
"""simple docstring"""
pass
@require_torch
def A ( self : Tuple ):
"""simple docstring"""
__snake_case = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
__snake_case = object_detector(
"./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
{"score": 0.7235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
{"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
] , )
__snake_case = object_detector(
[
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
[
{"score": 0.7235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
{"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
]
] , )
@require_torch
@slow
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = pipeline("zero-shot-object-detection" )
__snake_case = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
] , )
__snake_case = object_detector(
[
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
] , )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
],
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
],
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def A ( self : Union[str, Any] ):
"""simple docstring"""
pass
@require_torch
@slow
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = 0.2
__snake_case = pipeline("zero-shot-object-detection" )
__snake_case = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=a_ , )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
] , )
@require_torch
@slow
def A ( self : int ):
"""simple docstring"""
__snake_case = 2
__snake_case = pipeline("zero-shot-object-detection" )
__snake_case = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=a_ , )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
] , )
| 714 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
a : List[Any] = logging.get_logger(__name__)
a : Dict = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
a : Any = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
a : Optional[int] = {
'''facebook/blenderbot_small-90M''': 512,
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = BlenderbotSmallTokenizer
def __init__( self : List[Any] , a_ : Optional[int]=None , a_ : Dict=None , a_ : int="<|endoftext|>" , a_ : str="<|endoftext|>" , a_ : Any="<|endoftext|>" , a_ : Dict=False , a_ : Optional[Any]=True , **a_ : Dict , ):
"""simple docstring"""
super().__init__(
ByteLevelBPETokenizer(
vocab=a_ , merges=a_ , add_prefix_space=a_ , trim_offsets=a_ , ) , bos_token=a_ , eos_token=a_ , unk_token=a_ , **a_ , )
__snake_case = add_prefix_space
def A ( self : Dict , a_ : int , a_ : Union[str, Any]=None ):
"""simple docstring"""
__snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A ( self : str , a_ : List[int] , a_ : Optional[List[int]] = None ):
"""simple docstring"""
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 680 | 0 |
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
a : Optional[int] = random.Random()
def __UpperCAmelCase ( _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str]=1.0 , _UpperCAmelCase : Any=None , _UpperCAmelCase : Dict=None ) -> List[Any]:
if rng is None:
__snake_case = global_rng
__snake_case = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self : Any , a_ : List[str] , a_ : Optional[int]=7 , a_ : Dict=400 , a_ : Dict=2_000 , a_ : List[Any]=10 , a_ : Optional[int]=160 , a_ : Tuple=8 , a_ : Tuple=0.0 , a_ : Optional[Any]=4_000 , a_ : str=False , a_ : Dict=True , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = min_seq_length
__snake_case = max_seq_length
__snake_case = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__snake_case = padding_value
__snake_case = sampling_rate
__snake_case = return_attention_mask
__snake_case = do_normalize
__snake_case = feature_size
__snake_case = chunk_length
__snake_case = hop_length
def A ( self : Optional[int] ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def A ( self : Optional[int] , a_ : Optional[int]=False , a_ : Union[str, Any]=False ):
"""simple docstring"""
def _flatten(a_ : Any ):
return list(itertools.chain(*__lowercase ) )
if equal_length:
__snake_case = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__snake_case = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__snake_case = [np.asarray(__lowercase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = WhisperFeatureExtractor if is_speech_available() else None
def A ( self : int ):
"""simple docstring"""
__snake_case = WhisperFeatureExtractionTester(self )
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case = feat_extract_first.save_pretrained(__lowercase )[0]
check_json_file_has_correct_format(__lowercase )
__snake_case = self.feature_extraction_class.from_pretrained(__lowercase )
__snake_case = feat_extract_first.to_dict()
__snake_case = feat_extract_second.to_dict()
__snake_case = feat_extract_first.mel_filters
__snake_case = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__lowercase , __lowercase ) )
self.assertEqual(__lowercase , __lowercase )
def A ( self : Any ):
"""simple docstring"""
__snake_case = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case = os.path.join(__lowercase , "feat_extract.json" )
feat_extract_first.to_json_file(__lowercase )
__snake_case = self.feature_extraction_class.from_json_file(__lowercase )
__snake_case = feat_extract_first.to_dict()
__snake_case = feat_extract_second.to_dict()
__snake_case = feat_extract_first.mel_filters
__snake_case = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__lowercase , __lowercase ) )
self.assertEqual(__lowercase , __lowercase )
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__snake_case = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__snake_case = [np.asarray(__lowercase ) for speech_input in speech_inputs]
# Test feature size
__snake_case = feature_extractor(__lowercase , padding="max_length" , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
__snake_case = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
__snake_case = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(__lowercase , __lowercase , atol=1e-3 ) )
# Test batched
__snake_case = feature_extractor(__lowercase , return_tensors="np" ).input_features
__snake_case = feature_extractor(__lowercase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__lowercase , __lowercase ):
self.assertTrue(np.allclose(__lowercase , __lowercase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__snake_case = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__snake_case = np.asarray(__lowercase )
__snake_case = feature_extractor(__lowercase , return_tensors="np" ).input_features
__snake_case = feature_extractor(__lowercase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__lowercase , __lowercase ):
self.assertTrue(np.allclose(__lowercase , __lowercase , atol=1e-3 ) )
# Test truncation required
__snake_case = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
__snake_case = [np.asarray(__lowercase ) for speech_input in speech_inputs]
__snake_case = [x[: feature_extractor.n_samples] for x in speech_inputs]
__snake_case = [np.asarray(__lowercase ) for speech_input in speech_inputs_truncated]
__snake_case = feature_extractor(__lowercase , return_tensors="np" ).input_features
__snake_case = feature_extractor(__lowercase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__lowercase , __lowercase ):
self.assertTrue(np.allclose(__lowercase , __lowercase , atol=1e-3 ) )
def A ( self : Dict ):
"""simple docstring"""
import torch
__snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__snake_case = np.random.rand(100 , 32 ).astype(np.floataa )
__snake_case = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__snake_case = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__snake_case = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def A ( self : Any , a_ : Optional[int] ):
"""simple docstring"""
__snake_case = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
__snake_case = ds.sort("id" ).select(range(__lowercase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
__snake_case = self._load_datasamples(1 )
__snake_case = WhisperFeatureExtractor()
__snake_case = feature_extractor(__lowercase , return_tensors="pt" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3_000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , __lowercase , atol=1e-4 ) )
def A ( self : Any ):
"""simple docstring"""
__snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__snake_case = self._load_datasamples(1 )[0]
__snake_case = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue
__snake_case = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__lowercase )[0]
self.assertTrue(np.all(np.mean(__lowercase ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__lowercase ) - 1 ) < 1e-3 ) )
| 715 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a : str = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
a : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 680 | 0 |
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = StableUnCLIPPipeline
__SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_BATCH_PARAMS
__SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__SCREAMING_SNAKE_CASE = False
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = 32
__snake_case = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__snake_case = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__snake_case = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=A_ , projection_dim=A_ , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
__snake_case = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=A_ , num_layers=1 , )
torch.manual_seed(0 )
__snake_case = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_000 , clip_sample=A_ , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
__snake_case = StableUnCLIPImageNormalizer(embedding_dim=A_ )
__snake_case = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
__snake_case = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__snake_case = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=A_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
__snake_case = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=A_ , layers_per_block=1 , upcast_attention=A_ , use_linear_projection=A_ , )
torch.manual_seed(0 )
__snake_case = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=A_ , steps_offset=1 , )
torch.manual_seed(0 )
__snake_case = AutoencoderKL()
__snake_case = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def A ( self : int , a_ : List[str] , a_ : List[str]=0 ):
"""simple docstring"""
if str(A_ ).startswith("mps" ):
__snake_case = torch.manual_seed(A_ )
else:
__snake_case = torch.Generator(device=A_ ).manual_seed(A_ )
__snake_case = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def A ( self : Tuple ):
"""simple docstring"""
__snake_case = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=A_ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=A_ )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : List[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Tuple ):
"""simple docstring"""
__snake_case = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
__snake_case = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__snake_case = torch.Generator(device="cpu" ).manual_seed(0 )
__snake_case = pipe("anime turle" , generator=A_ , output_type="np" )
__snake_case = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(A_ , A_ )
def A ( self : Optional[Any] ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__snake_case = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
__snake_case = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__snake_case = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
__snake_case = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 716 |
'''simple docstring'''
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
a : Optional[Any] = float('''nan''')
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any , a_ : Optional[int] ):
"""simple docstring"""
__snake_case = sys.stdout
__snake_case = open(a_ , "a" )
def __getattr__( self : str , a_ : List[Any] ):
"""simple docstring"""
return getattr(self.stdout , a_ )
def A ( self : Union[str, Any] , a_ : List[Any] ):
"""simple docstring"""
self.stdout.write(a_ )
# strip tqdm codes
self.file.write(re.sub(r"^.*\r" , "" , a_ , 0 , re.M ) )
def __UpperCAmelCase ( _UpperCAmelCase : int=80 , _UpperCAmelCase : Any=False ) -> Optional[int]:
__snake_case = []
# deal with critical env vars
__snake_case = ["CUDA_VISIBLE_DEVICES"]
for key in env_keys:
__snake_case = os.environ.get(_UpperCAmelCase , _UpperCAmelCase )
if val is not None:
cmd.append(F'''{key}={val}''' )
# python executable (not always needed if the script is executable)
__snake_case = sys.executable if full_python_path else sys.executable.split("/" )[-1]
cmd.append(_UpperCAmelCase )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
__snake_case = []
__snake_case = ""
while len(_UpperCAmelCase ) > 0:
current_line += F'''{cmd.pop(0 )} '''
if len(_UpperCAmelCase ) == 0 or len(_UpperCAmelCase ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(_UpperCAmelCase )
__snake_case = ""
return "\\\n".join(_UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Tuple:
# unwrap multi-line input
__snake_case = re.sub(R"[\\\n]+" , " " , args.base_cmd )
# remove --output_dir if any and set our own
__snake_case = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd )
args.base_cmd += F''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
__snake_case = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Any ) -> str:
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 1_00 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.2222_2222] )} , )
__snake_case = subprocess.run(_UpperCAmelCase , capture_output=_UpperCAmelCase , text=_UpperCAmelCase )
if verbose:
print("STDOUT" , result.stdout )
print("STDERR" , result.stderr )
# save the streams
__snake_case = variation.replace(" " , "-" )
with open(Path(_UpperCAmelCase ) / F'''log.{prefix}.stdout.txt''' , "w" ) as f:
f.write(result.stdout )
with open(Path(_UpperCAmelCase ) / F'''log.{prefix}.stderr.txt''' , "w" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("failed" )
return {target_metric_key: nan}
with io.open(F'''{output_dir}/all_results.json''' , "r" , encoding="utf-8" ) as f:
__snake_case = json.load(_UpperCAmelCase )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict , ) -> Dict:
__snake_case = []
__snake_case = []
__snake_case = F'''{id}: {variation:<{longest_variation_len}}'''
__snake_case = F'''{preamble}: '''
__snake_case = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(_UpperCAmelCase ) , desc=_UpperCAmelCase , leave=_UpperCAmelCase ):
__snake_case = process_run_single(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__snake_case = single_run_metrics[target_metric_key]
if not math.isnan(_UpperCAmelCase ):
metrics.append(_UpperCAmelCase )
results.append(_UpperCAmelCase )
outcome += "✓"
else:
outcome += "✘"
__snake_case = F'''\33[2K\r{outcome}'''
if len(_UpperCAmelCase ) > 0:
__snake_case = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
__snake_case = round(mean_metrics[target_metric_key] , 2 )
__snake_case = F'''{outcome} {mean_target}'''
if len(_UpperCAmelCase ) > 1:
results_str += F''' {tuple(round(_UpperCAmelCase , 2 ) for x in results )}'''
print(_UpperCAmelCase )
__snake_case = variation
return mean_metrics
else:
print(_UpperCAmelCase )
return {variation_key: variation, target_metric_key: nan}
def __UpperCAmelCase ( ) -> Optional[int]:
__snake_case = torch.cuda.get_device_properties(torch.device("cuda" ) )
return F'''
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
'''
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple ) -> List[Any]:
__snake_case = pd.DataFrame(_UpperCAmelCase )
__snake_case = "variation"
__snake_case = "diff_%"
__snake_case = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
__snake_case = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(_UpperCAmelCase ):
# as a fallback, use the minimal value as the sentinel
__snake_case = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(_UpperCAmelCase ):
__snake_case = df.apply(
lambda _UpperCAmelCase : round(1_00 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="columns" , )
# re-order columns
__snake_case = [variation_key, target_metric_key, diff_key, *report_metric_keys]
__snake_case = df.reindex(_UpperCAmelCase , axis="columns" ) # reorder cols
# capitalize
__snake_case = df.rename(str.capitalize , axis="columns" )
# make the cols as narrow as possible
__snake_case = df.rename(lambda _UpperCAmelCase : c.replace("_" , "<br>" ) , axis="columns" )
__snake_case = df.rename(lambda _UpperCAmelCase : c.replace("_" , "\n" ) , axis="columns" )
__snake_case = ["", "Copy between the cut-here-lines and paste as is to github or a forum"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=_UpperCAmelCase , floatfmt=".2f" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=_UpperCAmelCase , floatfmt=".2f" )]
print("\n\n".join(_UpperCAmelCase ) )
def __UpperCAmelCase ( ) -> Dict:
__snake_case = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Base cmd" , )
parser.add_argument(
"--variations" , default=_UpperCAmelCase , type=_UpperCAmelCase , nargs="+" , required=_UpperCAmelCase , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , )
parser.add_argument(
"--base-variation" , default=_UpperCAmelCase , type=_UpperCAmelCase , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , )
parser.add_argument(
"--target-metric-key" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , )
parser.add_argument(
"--report-metric-keys" , default="" , type=_UpperCAmelCase , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , )
parser.add_argument(
"--repeat-times" , default=1 , type=_UpperCAmelCase , help="How many times to re-run each variation - an average will be reported" , )
parser.add_argument(
"--output_dir" , default="output_benchmark" , type=_UpperCAmelCase , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , )
parser.add_argument(
"--verbose" , default=_UpperCAmelCase , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , )
__snake_case = parser.parse_args()
__snake_case = args.output_dir
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
__snake_case = get_base_command(_UpperCAmelCase , _UpperCAmelCase )
# split each dimension into its --foo variations
__snake_case = [list(map(str.strip , re.split(R"\|" , _UpperCAmelCase ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
__snake_case = list(map(str.strip , map(" ".join , itertools.product(*_UpperCAmelCase ) ) ) )
__snake_case = max(len(_UpperCAmelCase ) for x in variations )
# split wanted keys
__snake_case = args.report_metric_keys.split()
# capture prints into a log file for convenience
__snake_case = F'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'''
print(F'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(F'''and this script\'s output is also piped into {report_fn}''' )
__snake_case = Tee(_UpperCAmelCase )
print(F'''\n*** Running {len(_UpperCAmelCase )} benchmarks:''' )
print(F'''Base command: {" ".join(_UpperCAmelCase )}''' )
__snake_case = "variation"
__snake_case = []
for id, variation in enumerate(tqdm(_UpperCAmelCase , desc="Total completion: " , leave=_UpperCAmelCase ) ):
__snake_case = base_cmd + variation.split()
results.append(
process_run(
id + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , args.target_metric_key , _UpperCAmelCase , args.repeat_times , _UpperCAmelCase , args.verbose , ) )
process_results(_UpperCAmelCase , args.target_metric_key , _UpperCAmelCase , args.base_variation , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 680 | 0 |
'''simple docstring'''
import argparse
import struct
import unittest
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Optional[int] , a_ : str ):
"""simple docstring"""
__snake_case = data
# Initialize hash values
__snake_case = [
0x6a_09_e6_67,
0xbb_67_ae_85,
0x3c_6e_f3_72,
0xa5_4f_f5_3a,
0x51_0e_52_7f,
0x9b_05_68_8c,
0x1f_83_d9_ab,
0x5b_e0_cd_19,
]
# Initialize round constants
__snake_case = [
0x42_8a_2f_98,
0x71_37_44_91,
0xb5_c0_fb_cf,
0xe9_b5_db_a5,
0x39_56_c2_5b,
0x59_f1_11_f1,
0x92_3f_82_a4,
0xab_1c_5e_d5,
0xd8_07_aa_98,
0x12_83_5b_01,
0x24_31_85_be,
0x55_0c_7d_c3,
0x72_be_5d_74,
0x80_de_b1_fe,
0x9b_dc_06_a7,
0xc1_9b_f1_74,
0xe4_9b_69_c1,
0xef_be_47_86,
0x0f_c1_9d_c6,
0x24_0c_a1_cc,
0x2d_e9_2c_6f,
0x4a_74_84_aa,
0x5c_b0_a9_dc,
0x76_f9_88_da,
0x98_3e_51_52,
0xa8_31_c6_6d,
0xb0_03_27_c8,
0xbf_59_7f_c7,
0xc6_e0_0b_f3,
0xd5_a7_91_47,
0x06_ca_63_51,
0x14_29_29_67,
0x27_b7_0a_85,
0x2e_1b_21_38,
0x4d_2c_6d_fc,
0x53_38_0d_13,
0x65_0a_73_54,
0x76_6a_0a_bb,
0x81_c2_c9_2e,
0x92_72_2c_85,
0xa2_bf_e8_a1,
0xa8_1a_66_4b,
0xc2_4b_8b_70,
0xc7_6c_51_a3,
0xd1_92_e8_19,
0xd6_99_06_24,
0xf4_0e_35_85,
0x10_6a_a0_70,
0x19_a4_c1_16,
0x1e_37_6c_08,
0x27_48_77_4c,
0x34_b0_bc_b5,
0x39_1c_0c_b3,
0x4e_d8_aa_4a,
0x5b_9c_ca_4f,
0x68_2e_6f_f3,
0x74_8f_82_ee,
0x78_a5_63_6f,
0x84_c8_78_14,
0x8c_c7_02_08,
0x90_be_ff_fa,
0xa4_50_6c_eb,
0xbe_f9_a3_f7,
0xc6_71_78_f2,
]
__snake_case = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def A ( a_ : Dict ):
"""simple docstring"""
__snake_case = B"\x80" + (B"\x00" * (63 - (len(a_ ) + 8) % 64))
__snake_case = struct.pack(">Q" , (len(a_ ) * 8) )
return data + padding + big_endian_integer
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
__snake_case = list(struct.unpack(">16L" , a_ ) )
# add 48 0-ed integers
words += [0] * 48
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
__snake_case = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
__snake_case = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
__snake_case = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_00_00_00_00
# Compression
__snake_case = self.ror(a_ , 6 ) ^ self.ror(a_ , 11 ) ^ self.ror(a_ , 25 )
__snake_case = (e & f) ^ ((~e & 0xff_ff_ff_ff) & g)
__snake_case = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_00_00_00_00
__snake_case = self.ror(a_ , 2 ) ^ self.ror(a_ , 13 ) ^ self.ror(a_ , 22 )
__snake_case = (a & b) ^ (a & c) ^ (b & c)
__snake_case = (sa + maj) % 0x1_00_00_00_00
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case = (
g,
f,
e,
((d + tempa) % 0x1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0x1_00_00_00_00),
)
__snake_case = [a, b, c, d, e, f, g, h]
# Modify final values
__snake_case = [
((element + mutated_hash_values[index]) % 0x1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
__snake_case = "".join([hex(a_ )[2:].zfill(8 ) for value in self.hashes] )
def A ( self : List[str] , a_ : Tuple , a_ : Union[str, Any] ):
"""simple docstring"""
return 0xff_ff_ff_ff & (value << (32 - rotations)) | (value >> rotations)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : Dict ):
"""simple docstring"""
import hashlib
__snake_case = bytes("Test String" , "utf-8" )
self.assertEqual(SHAaaa(a_ ).hash , hashlib.shaaaa(a_ ).hexdigest() )
def __UpperCAmelCase ( ) -> List[str]:
import doctest
doctest.testmod()
__snake_case = argparse.ArgumentParser()
parser.add_argument(
"-s" , "--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , )
parser.add_argument(
"-f" , "--file" , dest="input_file" , help="Hash contents of a file" )
__snake_case = parser.parse_args()
__snake_case = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , "rb" ) as f:
__snake_case = f.read()
else:
__snake_case = bytes(_UpperCAmelCase , "utf-8" )
print(SHAaaa(_UpperCAmelCase ).hash )
if __name__ == "__main__":
main()
| 717 |
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def __UpperCAmelCase ( _UpperCAmelCase : Dict ) -> int: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def __UpperCAmelCase ( ) -> Dict:
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
__snake_case = [1, 2, 3]
with pytest.raises(_UpperCAmelCase ):
with parallel_backend("unsupported backend" ):
map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=2 )
with pytest.raises(_UpperCAmelCase ):
with parallel_backend("unsupported backend" ):
map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" , [2, -1] )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] ) -> Optional[int]:
__snake_case = [1, 2]
__snake_case = {"a": 1, "b": 2}
__snake_case = {"a": [1, 2], "b": [3, 4]}
__snake_case = {"a": {"1": 1}, "b": 2}
__snake_case = {"a": 1, "b": 2, "c": 3, "d": 4}
__snake_case = [2, 3]
__snake_case = {"a": 2, "b": 3}
__snake_case = {"a": [2, 3], "b": [4, 5]}
__snake_case = {"a": {"1": 2}, "b": 3}
__snake_case = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
| 680 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Optional[int] , a_ : Optional[int] , a_ : Optional[int]=12 , a_ : Union[str, Any]=7 , a_ : List[Any]=True , a_ : Optional[int]=True , a_ : Dict=True , a_ : Dict=99 , a_ : str=32 , a_ : List[str]=32 , a_ : Dict=2 , a_ : int=4 , a_ : Any=37 , a_ : Optional[Any]=0.1 , a_ : Any=0.1 , a_ : Tuple=512 , a_ : List[Any]=0.02 , a_ : List[Any]=0 , a_ : Optional[int]=None , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = projection_dim
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = dropout
__snake_case = attention_dropout
__snake_case = max_position_embeddings
__snake_case = initializer_range
__snake_case = scope
__snake_case = bos_token_id
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
__snake_case = input_mask.numpy()
__snake_case = input_mask.shape
__snake_case = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase__ ):
__snake_case = 1
__snake_case = 0
__snake_case = self.get_config()
return config, input_ids, tf.convert_to_tensor(lowerCamelCase__ )
def A ( self : str ):
"""simple docstring"""
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def A ( self : Any , a_ : int , a_ : str , a_ : Tuple ):
"""simple docstring"""
__snake_case = TFBlipTextModel(config=lowerCamelCase__ )
__snake_case = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , training=lowerCamelCase__ )
__snake_case = model(lowerCamelCase__ , training=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A ( self : Any ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case = config_and_inputs
__snake_case = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = (TFBlipTextModel,) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def A ( self : str ):
"""simple docstring"""
__snake_case = BlipTextModelTester(self )
__snake_case = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def A ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def A ( self : Optional[Any] ):
"""simple docstring"""
pass
def A ( self : Tuple ):
"""simple docstring"""
pass
@unittest.skip(reason="Blip does not use inputs_embeds" )
def A ( self : Optional[int] ):
"""simple docstring"""
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def A ( self : str ):
"""simple docstring"""
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def A ( self : Union[str, Any] ):
"""simple docstring"""
pass
@slow
def A ( self : int ):
"""simple docstring"""
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = TFBlipTextModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def A ( self : str , a_ : Tuple=True ):
"""simple docstring"""
super().test_pt_tf_model_equivalence(allow_missing_keys=lowerCamelCase__ )
| 718 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : int = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """mobilenet_v2"""
def __init__( self : Tuple , a_ : int=3 , a_ : int=224 , a_ : List[Any]=1.0 , a_ : List[str]=8 , a_ : Dict=8 , a_ : Optional[Any]=6 , a_ : Optional[Any]=32 , a_ : str=True , a_ : Union[str, Any]=True , a_ : List[Any]="relu6" , a_ : Optional[Any]=True , a_ : Any=0.8 , a_ : Dict=0.02 , a_ : Optional[int]=0.001 , a_ : Optional[int]=255 , **a_ : List[str] , ):
"""simple docstring"""
super().__init__(**a_ )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
__snake_case = num_channels
__snake_case = image_size
__snake_case = depth_multiplier
__snake_case = depth_divisible_by
__snake_case = min_depth
__snake_case = expand_ratio
__snake_case = output_stride
__snake_case = first_layer_is_expansion
__snake_case = finegrained_output
__snake_case = hidden_act
__snake_case = tf_padding
__snake_case = classifier_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = semantic_loss_ignore_index
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = version.parse("""1.11""" )
@property
def A ( self : Optional[int] ):
"""simple docstring"""
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def A ( self : Optional[int] ):
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def A ( self : int ):
"""simple docstring"""
return 1e-4
| 680 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : int = logging.get_logger(__name__)
a : int = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( __a ):
__SCREAMING_SNAKE_CASE = "switch_transformers"
__SCREAMING_SNAKE_CASE = ["past_key_values"]
__SCREAMING_SNAKE_CASE = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self : Tuple , a_ : Optional[Any]=32_128 , a_ : Any=768 , a_ : Union[str, Any]=64 , a_ : List[Any]=2_048 , a_ : Optional[int]=64 , a_ : Any=12 , a_ : Union[str, Any]=3 , a_ : Tuple=12 , a_ : Union[str, Any]=3 , a_ : Union[str, Any]=12 , a_ : List[str]=8 , a_ : int=False , a_ : Tuple=0.01 , a_ : Optional[int]="float32" , a_ : Tuple=False , a_ : Optional[int]=32 , a_ : List[str]=128 , a_ : List[Any]=0.1 , a_ : Union[str, Any]=1e-6 , a_ : str=0.001 , a_ : Dict=0.001 , a_ : Union[str, Any]=1.0 , a_ : Any="relu" , a_ : Any=True , a_ : Tuple=False , a_ : int=True , a_ : Optional[Any]=0 , a_ : List[str]=1 , **a_ : str , ):
"""simple docstring"""
__snake_case = vocab_size
__snake_case = d_model
__snake_case = d_kv
__snake_case = d_ff
__snake_case = num_sparse_encoder_layers
__snake_case = num_layers
__snake_case = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__snake_case = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
__snake_case = self.num_layers // self.num_sparse_encoder_layers
else:
__snake_case = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
__snake_case = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
__snake_case = self.num_decoder_layers # HACK: this will create 0 sparse layers
__snake_case = num_heads
__snake_case = num_experts
__snake_case = expert_capacity
__snake_case = router_bias
__snake_case = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
__snake_case = router_dtype
__snake_case = router_ignore_padding_tokens
__snake_case = relative_attention_num_buckets
__snake_case = relative_attention_max_distance
__snake_case = dropout_rate
__snake_case = layer_norm_epsilon
__snake_case = initializer_factor
__snake_case = feed_forward_proj
__snake_case = use_cache
__snake_case = add_router_probs
__snake_case = router_z_loss_coef
__snake_case = router_aux_loss_coef
__snake_case = self.feed_forward_proj.split("-" )
__snake_case = act_info[-1]
__snake_case = act_info[0] == "gated"
if len(lowerCAmelCase_ ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase_ ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__snake_case = "gelu_new"
super().__init__(
pad_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ , )
| 719 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : List[Any] = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """data2vec-text"""
def __init__( self : List[str] , a_ : str=30_522 , a_ : Optional[int]=768 , a_ : Dict=12 , a_ : int=12 , a_ : Dict=3_072 , a_ : Dict="gelu" , a_ : Optional[Any]=0.1 , a_ : List[str]=0.1 , a_ : int=512 , a_ : Any=2 , a_ : int=0.02 , a_ : Dict=1e-12 , a_ : Dict=1 , a_ : Any=0 , a_ : Dict=2 , a_ : Optional[int]="absolute" , a_ : List[Any]=True , a_ : Dict=None , **a_ : List[str] , ):
"""simple docstring"""
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = position_embedding_type
__snake_case = use_cache
__snake_case = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
@property
def A ( self : Any ):
"""simple docstring"""
if self.task == "multiple-choice":
__snake_case = {0: "batch", 1: "choice", 2: "sequence"}
else:
__snake_case = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 680 | 0 |
from collections.abc import Sequence
from queue import Queue
class SCREAMING_SNAKE_CASE__ :
def __init__( self : int , a_ : List[str] , a_ : Union[str, Any] , a_ : Union[str, Any] , a_ : Tuple=None , a_ : str=None ):
"""simple docstring"""
__snake_case = start
__snake_case = end
__snake_case = val
__snake_case = (start + end) // 2
__snake_case = left
__snake_case = right
def __repr__( self : List[Any] ):
"""simple docstring"""
return f'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'''
class SCREAMING_SNAKE_CASE__ :
def __init__( self : int , a_ : Sequence , a_ : Optional[Any] ):
"""simple docstring"""
__snake_case = collection
__snake_case = function
if self.collection:
__snake_case = self._build_tree(0 , len(__a ) - 1 )
def A ( self : List[Any] , a_ : Tuple , a_ : Union[str, Any] ):
"""simple docstring"""
self._update_tree(self.root , __a , __a )
def A ( self : Any , a_ : List[str] , a_ : Optional[int] ):
"""simple docstring"""
return self._query_range(self.root , __a , __a )
def A ( self : List[Any] , a_ : str , a_ : Dict ):
"""simple docstring"""
if start == end:
return SegmentTreeNode(__a , __a , self.collection[start] )
__snake_case = (start + end) // 2
__snake_case = self._build_tree(__a , __a )
__snake_case = self._build_tree(mid + 1 , __a )
return SegmentTreeNode(__a , __a , self.fn(left.val , right.val ) , __a , __a )
def A ( self : Union[str, Any] , a_ : Dict , a_ : str , a_ : Optional[Any] ):
"""simple docstring"""
if node.start == i and node.end == i:
__snake_case = val
return
if i <= node.mid:
self._update_tree(node.left , __a , __a )
else:
self._update_tree(node.right , __a , __a )
__snake_case = self.fn(node.left.val , node.right.val )
def A ( self : Tuple , a_ : Dict , a_ : Optional[Any] , a_ : int ):
"""simple docstring"""
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , __a , __a )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , __a , node.mid ) , self._query_range(node.right , node.mid + 1 , __a ) , )
else:
# range in right child tree
return self._query_range(node.right , __a , __a )
def A ( self : List[Any] ):
"""simple docstring"""
if self.root is not None:
__snake_case = Queue()
queue.put(self.root )
while not queue.empty():
__snake_case = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
a : int = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 720 |
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
a : Tuple = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def A ( self : Union[str, Any] , a_ : List[str] , a_ : Optional[int] , a_ : List[str]=None , a_ : Any=None ):
"""simple docstring"""
__snake_case = self.layer[current_layer](a_ , a_ , head_mask[current_layer] )
__snake_case = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"""The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.""" , _UpperCamelCase , )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def __init__( self : int , a_ : int ):
"""simple docstring"""
super().__init__(a_ )
__snake_case = BertEncoderWithPabee(a_ )
self.init_weights()
__snake_case = 0
__snake_case = 0
__snake_case = 0
__snake_case = 0
def A ( self : Optional[int] , a_ : Union[str, Any] ):
"""simple docstring"""
__snake_case = threshold
def A ( self : Optional[Any] , a_ : Union[str, Any] ):
"""simple docstring"""
__snake_case = patience
def A ( self : Any ):
"""simple docstring"""
__snake_case = 0
__snake_case = 0
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.inference_layers_num / self.inference_instances_num
__snake_case = (
f'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
f''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(a_ )
@add_start_docstrings_to_model_forward(a_ )
def A ( self : Dict , a_ : Optional[Any]=None , a_ : Union[str, Any]=None , a_ : int=None , a_ : Optional[int]=None , a_ : int=None , a_ : Optional[Any]=None , a_ : Union[str, Any]=None , a_ : int=None , a_ : Any=None , a_ : Optional[Any]=None , a_ : Any=False , ):
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
__snake_case = input_ids.size()
elif inputs_embeds is not None:
__snake_case = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
__snake_case = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__snake_case = torch.ones(a_ , device=a_ )
if token_type_ids is None:
__snake_case = torch.zeros(a_ , dtype=torch.long , device=a_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__snake_case = self.get_extended_attention_mask(a_ , a_ , a_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
__snake_case , __snake_case , __snake_case = encoder_hidden_states.size()
__snake_case = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
__snake_case = torch.ones(a_ , device=a_ )
__snake_case = self.invert_attention_mask(a_ )
else:
__snake_case = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__snake_case = self.get_head_mask(a_ , self.config.num_hidden_layers )
__snake_case = self.embeddings(
input_ids=a_ , position_ids=a_ , token_type_ids=a_ , inputs_embeds=a_ )
__snake_case = embedding_output
if self.training:
__snake_case = []
for i in range(self.config.num_hidden_layers ):
__snake_case = self.encoder.adaptive_forward(
a_ , current_layer=a_ , attention_mask=a_ , head_mask=a_ )
__snake_case = self.pooler(a_ )
__snake_case = output_layers[i](output_dropout(a_ ) )
res.append(a_ )
elif self.patience == 0: # Use all layers for inference
__snake_case = self.encoder(
a_ , attention_mask=a_ , head_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , )
__snake_case = self.pooler(encoder_outputs[0] )
__snake_case = [output_layers[self.config.num_hidden_layers - 1](a_ )]
else:
__snake_case = 0
__snake_case = None
__snake_case = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
__snake_case = self.encoder.adaptive_forward(
a_ , current_layer=a_ , attention_mask=a_ , head_mask=a_ )
__snake_case = self.pooler(a_ )
__snake_case = output_layers[i](a_ )
if regression:
__snake_case = logits.detach()
if patient_result is not None:
__snake_case = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
__snake_case = 0
else:
__snake_case = logits.detach().argmax(dim=1 )
if patient_result is not None:
__snake_case = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(a_ ) ):
patient_counter += 1
else:
__snake_case = 0
__snake_case = logits
if patient_counter == self.patience:
break
__snake_case = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"""Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """ , _UpperCamelCase , )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def __init__( self : List[str] , a_ : Tuple ):
"""simple docstring"""
super().__init__(a_ )
__snake_case = config.num_labels
__snake_case = BertModelWithPabee(a_ )
__snake_case = nn.Dropout(config.hidden_dropout_prob )
__snake_case = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(a_ )
def A ( self : int , a_ : str=None , a_ : Tuple=None , a_ : Union[str, Any]=None , a_ : List[str]=None , a_ : Optional[int]=None , a_ : Union[str, Any]=None , a_ : Tuple=None , ):
"""simple docstring"""
__snake_case = self.bert(
input_ids=a_ , attention_mask=a_ , token_type_ids=a_ , position_ids=a_ , head_mask=a_ , inputs_embeds=a_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
__snake_case = (logits[-1],)
if labels is not None:
__snake_case = None
__snake_case = 0
for ix, logits_item in enumerate(a_ ):
if self.num_labels == 1:
# We are doing regression
__snake_case = MSELoss()
__snake_case = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
__snake_case = CrossEntropyLoss()
__snake_case = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
__snake_case = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
__snake_case = (total_loss / total_weights,) + outputs
return outputs
| 680 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : str = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
a : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 721 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self : str , a_ : Tuple , a_ : Optional[Any]=2 , a_ : str=32 , a_ : Dict=16 , a_ : List[str]=3 , a_ : Dict=True , a_ : Optional[int]=True , a_ : List[str]=32 , a_ : int=4 , a_ : str=[0, 1, 2, 3] , a_ : Any=4 , a_ : Optional[int]=37 , a_ : Any="gelu" , a_ : Optional[int]=0.1 , a_ : Optional[Any]=0.1 , a_ : Union[str, Any]=0.02 , a_ : Union[str, Any]=3 , a_ : Any=[1, 384, 24, 24] , a_ : Optional[Any]=True , a_ : Optional[int]=None , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = is_training
__snake_case = use_labels
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = backbone_out_indices
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = backbone_featmap_shape
__snake_case = scope
__snake_case = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__snake_case = (image_size // patch_size) ** 2
__snake_case = num_patches + 1
def A ( self : int ):
"""simple docstring"""
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case = self.get_config()
return config, pixel_values, labels
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [96, 192, 384, 768],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a_ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=a_ , backbone_featmap_shape=self.backbone_featmap_shape , )
def A ( self : int , a_ : Union[str, Any] , a_ : List[str] , a_ : List[str] ):
"""simple docstring"""
__snake_case = DPTModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : List[Any] , a_ : List[Any] , a_ : Union[str, Any] , a_ : List[str] ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = DPTForDepthEstimation(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def A ( self : Optional[Any] , a_ : List[str] , a_ : int , a_ : Tuple ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = DPTForSemanticSegmentation(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , labels=a_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE = (
{
"""depth-estimation""": DPTForDepthEstimation,
"""feature-extraction""": DPTModel,
"""image-segmentation""": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = DPTModelTester(self )
__snake_case = ConfigTester(self , config_class=a_ , has_text_modality=a_ , hidden_size=37 )
def A ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds" )
def A ( self : Any ):
"""simple docstring"""
pass
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_ , nn.Linear ) )
def A ( self : List[str] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a_ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a_ )
def A ( self : int ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*a_ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
def A ( self : Optional[int] ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = True
if model_class in get_values(a_ ):
continue
__snake_case = model_class(a_ )
model.to(a_ )
model.train()
__snake_case = self._prepare_for_class(a_ , a_ , return_labels=a_ )
__snake_case = model(**a_ ).loss
loss.backward()
def A ( self : int ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = False
__snake_case = True
if model_class in get_values(a_ ) or not model_class.supports_gradient_checkpointing:
continue
__snake_case = model_class(a_ )
model.to(a_ )
model.gradient_checkpointing_enable()
model.train()
__snake_case = self._prepare_for_class(a_ , a_ , return_labels=a_ )
__snake_case = model(**a_ ).loss
loss.backward()
def A ( self : Dict ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = _config_zero_init(a_ )
for model_class in self.all_model_classes:
__snake_case = model_class(config=a_ )
# Skip the check for the backbone
__snake_case = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__snake_case = [f'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A ( self : Tuple ):
"""simple docstring"""
pass
@slow
def A ( self : int ):
"""simple docstring"""
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__snake_case = DPTModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def A ( self : int ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = "add"
with self.assertRaises(a_ ):
__snake_case = DPTForDepthEstimation(a_ )
def __UpperCAmelCase ( ) -> Union[str, Any]:
__snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : Dict ):
"""simple docstring"""
__snake_case = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas" )
__snake_case = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas" ).to(a_ )
__snake_case = prepare_img()
__snake_case = image_processor(images=a_ , return_tensors="pt" ).to(a_ )
# forward pass
with torch.no_grad():
__snake_case = model(**a_ )
__snake_case = outputs.predicted_depth
# verify the predicted depth
__snake_case = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , a_ )
__snake_case = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , a_ , atol=1e-4 ) )
| 680 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : List[str] = logging.get_logger(__name__)
a : List[str] = {
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( __a ):
__SCREAMING_SNAKE_CASE = """luke"""
def __init__( self : Tuple , a_ : Optional[Any]=50_267 , a_ : Any=500_000 , a_ : List[str]=768 , a_ : str=256 , a_ : Optional[Any]=12 , a_ : Union[str, Any]=12 , a_ : Dict=3_072 , a_ : int="gelu" , a_ : Any=0.1 , a_ : Optional[Any]=0.1 , a_ : List[Any]=512 , a_ : Optional[int]=2 , a_ : Union[str, Any]=0.02 , a_ : int=1e-12 , a_ : Optional[int]=True , a_ : Optional[Any]=None , a_ : str=1 , a_ : Union[str, Any]=0 , a_ : Optional[int]=2 , **a_ : Any , ):
"""simple docstring"""
super().__init__(pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , **A__ )
__snake_case = vocab_size
__snake_case = entity_vocab_size
__snake_case = hidden_size
__snake_case = entity_emb_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = use_entity_aware_attention
__snake_case = classifier_dropout
| 700 |
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
def A ( self : Any ):
"""simple docstring"""
return self.__class__(**{k: copy.deepcopy(a_ ) for k, v in self.__dict__.items()} )
| 680 | 0 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = 'ClapFeatureExtractor'
__SCREAMING_SNAKE_CASE = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self : List[Any] , a_ : int , a_ : str ):
"""simple docstring"""
super().__init__(a_ , a_ )
def __call__( self : Any , a_ : Tuple=None , a_ : Optional[int]=None , a_ : int=None , **a_ : Optional[int] ):
"""simple docstring"""
__snake_case = kwargs.pop("sampling_rate" , a_ )
if text is None and audios is None:
raise ValueError("You have to specify either text or audios. Both cannot be none." )
if text is not None:
__snake_case = self.tokenizer(a_ , return_tensors=a_ , **a_ )
if audios is not None:
__snake_case = self.feature_extractor(
a_ , sampling_rate=a_ , return_tensors=a_ , **a_ )
if text is not None and audios is not None:
__snake_case = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def A ( self : str , *a_ : Dict , **a_ : int ):
"""simple docstring"""
return self.tokenizer.batch_decode(*a_ , **a_ )
def A ( self : List[Any] , *a_ : int , **a_ : Dict ):
"""simple docstring"""
return self.tokenizer.decode(*a_ , **a_ )
@property
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.tokenizer.model_input_names
__snake_case = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 701 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
a : Optional[Any] = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : int ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = "A painting of a squirrel eating a burger "
__snake_case = torch.manual_seed(0 )
__snake_case = pipe(
prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a_ )
__snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained(a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = generator.manual_seed(0 )
__snake_case = pipe(
prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = "A painting of a squirrel eating a burger "
__snake_case = torch.manual_seed(0 )
__snake_case = pipe(
prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
__snake_case = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 680 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : Optional[int] = {
'''configuration_megatron_bert''': ['''MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegatronBertConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
'''MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegatronBertForCausalLM''',
'''MegatronBertForMaskedLM''',
'''MegatronBertForMultipleChoice''',
'''MegatronBertForNextSentencePrediction''',
'''MegatronBertForPreTraining''',
'''MegatronBertForQuestionAnswering''',
'''MegatronBertForSequenceClassification''',
'''MegatronBertForTokenClassification''',
'''MegatronBertModel''',
'''MegatronBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
a : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 702 |
'''simple docstring'''
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('''>=''', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
a : Any = get_logger(__name__)
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any]=0 ) -> Any:
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
with FSDP.state_dict_type(
_UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__snake_case = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__snake_case = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if accelerator.process_index == 0:
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__snake_case = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__snake_case = os.path.join(_UpperCAmelCase , F'''{MODEL_NAME}_{model_index}''' )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
logger.info(F'''Saving model to {ckpt_dir}''' )
__snake_case = {"model": state_dict}
dist_cp.save_state_dict(
state_dict=_UpperCAmelCase , storage_writer=dist_cp.FileSystemWriter(_UpperCAmelCase ) , planner=DefaultSavePlanner() , )
logger.info(F'''Model saved to {ckpt_dir}''' )
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : str=0 ) -> List[str]:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
_UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(_UpperCAmelCase ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"Set the `sync_module_states` flag to `True` so that model states are synced across processes when "
"initializing FSDP object" )
return
__snake_case = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Loading model from {input_model_file}''' )
__snake_case = torch.load(_UpperCAmelCase )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__snake_case = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Loading model from {input_model_file}''' )
__snake_case = torch.load(_UpperCAmelCase )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__snake_case = (
os.path.join(_UpperCAmelCase , F'''{MODEL_NAME}_{model_index}''' )
if F'''{MODEL_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading model from {ckpt_dir}''' )
__snake_case = {"model": model.state_dict()}
dist_cp.load_state_dict(
state_dict=_UpperCAmelCase , storage_reader=dist_cp.FileSystemReader(_UpperCAmelCase ) , planner=DefaultLoadPlanner() , )
__snake_case = state_dict["model"]
logger.info(F'''Model loaded from {ckpt_dir}''' )
model.load_state_dict(_UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple=0 ) -> Union[str, Any]:
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
with FSDP.state_dict_type(
_UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__snake_case = FSDP.optim_state_dict(_UpperCAmelCase , _UpperCAmelCase )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
__snake_case = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Saving Optimizer state to {output_optimizer_file}''' )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Optimizer state saved in {output_optimizer_file}''' )
else:
__snake_case = os.path.join(_UpperCAmelCase , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
logger.info(F'''Saving Optimizer state to {ckpt_dir}''' )
dist_cp.save_state_dict(
state_dict={"optimizer": optim_state} , storage_writer=dist_cp.FileSystemWriter(_UpperCAmelCase ) , planner=DefaultSavePlanner() , )
logger.info(F'''Optimizer state saved in {ckpt_dir}''' )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int]=0 ) -> Union[str, Any]:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
_UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__snake_case = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
__snake_case = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Loading Optimizer state from {input_optimizer_file}''' )
__snake_case = torch.load(_UpperCAmelCase )
logger.info(F'''Optimizer state loaded from {input_optimizer_file}''' )
else:
__snake_case = (
os.path.join(_UpperCAmelCase , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
if F'''{OPTIMIZER_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading Optimizer from {ckpt_dir}''' )
__snake_case = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="optimizer" , storage_reader=dist_cp.FileSystemReader(_UpperCAmelCase ) , )
__snake_case = optim_state["optimizer"]
logger.info(F'''Optimizer loaded from {ckpt_dir}''' )
__snake_case = FSDP.optim_state_dict_to_load(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
optimizer.load_state_dict(_UpperCAmelCase )
| 680 | 0 |
'''simple docstring'''
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = ['''a''', '''b''', '''c''']
# Defaults to last layer if both are None
__snake_case = get_aligned_output_features_output_indices(lowercase__ , lowercase__ , lowercase__ )
self.assertEqual(lowercase__ , ["c"] )
self.assertEqual(lowercase__ , [2] )
# Out indices set to match out features
__snake_case = get_aligned_output_features_output_indices(["a", "c"] , lowercase__ , lowercase__ )
self.assertEqual(lowercase__ , ["a", "c"] )
self.assertEqual(lowercase__ , [0, 2] )
# Out features set to match out indices
__snake_case = get_aligned_output_features_output_indices(lowercase__ , [0, 2] , lowercase__ )
self.assertEqual(lowercase__ , ["a", "c"] )
self.assertEqual(lowercase__ , [0, 2] )
# Out features selected from negative indices
__snake_case = get_aligned_output_features_output_indices(lowercase__ , [-3, -1] , lowercase__ )
self.assertEqual(lowercase__ , ["a", "c"] )
self.assertEqual(lowercase__ , [-3, -1] )
def A ( self : Optional[int] ):
"""simple docstring"""
with self.assertRaises(lowercase__ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , lowercase__ )
# Out features must be a list
with self.assertRaises(lowercase__ ):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] )
# Out features must be a subset of stage names
with self.assertRaises(lowercase__ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] )
# Out indices must be a list or tuple
with self.assertRaises(lowercase__ ):
verify_out_features_out_indices(lowercase__ , 0 , ["a", "b"] )
# Out indices must be a subset of stage names
with self.assertRaises(lowercase__ ):
verify_out_features_out_indices(lowercase__ , (0, 1) , ["a"] )
# Out features and out indices must be the same length
with self.assertRaises(lowercase__ ):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] )
# Out features should match out indices
with self.assertRaises(lowercase__ ):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] )
# Out features and out indices should be in order
with self.assertRaises(lowercase__ ):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] )
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] )
def A ( self : Dict ):
"""simple docstring"""
__snake_case = BackboneMixin()
__snake_case = ['''a''', '''b''', '''c''']
__snake_case = ['''a''', '''c''']
__snake_case = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
__snake_case = ['''a''', '''b''']
self.assertEqual(backbone.out_features , ["a", "b"] )
self.assertEqual(backbone.out_indices , [0, 1] )
__snake_case = [-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 703 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
__snake_case = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_UpperCAmelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : List[Any] = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
a : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 704 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> str:
if number > 0:
raise ValueError("input must be a negative integer" )
__snake_case = len(bin(_UpperCAmelCase )[3:] )
__snake_case = bin(abs(_UpperCAmelCase ) - (1 << binary_number_length) )[3:]
__snake_case = (
(
"1"
+ "0" * (binary_number_length - len(_UpperCAmelCase ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a : Union[str, Any] = {
'''configuration_maskformer''': ['''MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MaskFormerConfig'''],
'''configuration_maskformer_swin''': ['''MaskFormerSwinConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = ['''MaskFormerFeatureExtractor''']
a : List[str] = ['''MaskFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
'''MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MaskFormerForInstanceSegmentation''',
'''MaskFormerModel''',
'''MaskFormerPreTrainedModel''',
]
a : int = [
'''MaskFormerSwinBackbone''',
'''MaskFormerSwinModel''',
'''MaskFormerSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 705 |
'''simple docstring'''
from timeit import timeit
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> int:
if number < 0:
raise ValueError("the value of input must not be negative" )
__snake_case = 0
while number:
number &= number - 1
result += 1
return result
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> int:
if number < 0:
raise ValueError("the value of input must not be negative" )
__snake_case = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def __UpperCAmelCase ( ) -> None:
def do_benchmark(_UpperCAmelCase : int ) -> None:
__snake_case = "import __main__ as z"
print(F'''Benchmark when {number = }:''' )
print(F'''{get_set_bits_count_using_modulo_operator(_UpperCAmelCase ) = }''' )
__snake_case = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=_UpperCAmelCase )
print(F'''timeit() runs in {timing} seconds''' )
print(F'''{get_set_bits_count_using_brian_kernighans_algorithm(_UpperCAmelCase ) = }''' )
__snake_case = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=_UpperCAmelCase , )
print(F'''timeit() runs in {timing} seconds''' )
for number in (25, 37, 58, 0):
do_benchmark(_UpperCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 680 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["torch", "transformers", "onnx"]
def __init__( self : int , *a_ : Any , **a_ : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def A ( cls : Dict , *a_ : Optional[int] , **a_ : Dict ):
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def A ( cls : Optional[int] , *a_ : Optional[int] , **a_ : int ):
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"] )
class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["torch", "transformers", "onnx"]
def __init__( self : Dict , *a_ : Optional[Any] , **a_ : List[Any] ):
"""simple docstring"""
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def A ( cls : List[str] , *a_ : Any , **a_ : str ):
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def A ( cls : Tuple , *a_ : str , **a_ : Optional[Any] ):
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"] )
class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["torch", "transformers", "onnx"]
def __init__( self : int , *a_ : int , **a_ : Optional[int] ):
"""simple docstring"""
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def A ( cls : List[Any] , *a_ : Optional[int] , **a_ : str ):
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def A ( cls : Optional[Any] , *a_ : str , **a_ : int ):
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"] )
class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["torch", "transformers", "onnx"]
def __init__( self : Optional[int] , *a_ : Optional[Any] , **a_ : List[Any] ):
"""simple docstring"""
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def A ( cls : Optional[Any] , *a_ : Optional[int] , **a_ : List[Any] ):
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def A ( cls : str , *a_ : List[str] , **a_ : Any ):
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"] )
class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["torch", "transformers", "onnx"]
def __init__( self : Tuple , *a_ : List[Any] , **a_ : int ):
"""simple docstring"""
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def A ( cls : int , *a_ : Optional[int] , **a_ : List[Any] ):
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def A ( cls : int , *a_ : List[Any] , **a_ : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"] )
class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["torch", "transformers", "onnx"]
def __init__( self : Any , *a_ : Any , **a_ : str ):
"""simple docstring"""
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def A ( cls : List[Any] , *a_ : str , **a_ : Optional[int] ):
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def A ( cls : Any , *a_ : int , **a_ : Dict ):
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"] )
| 706 |
'''simple docstring'''
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
a : Dict = '''sshleifer/bart-tiny-random'''
a : str = '''patrickvonplaten/t5-tiny-random'''
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def A ( self : Union[str, Any] ):
"""simple docstring"""
return AutoConfig.from_pretrained(a_ )
def A ( self : str ):
"""simple docstring"""
__snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=a_ )
def A ( self : Dict ):
"""simple docstring"""
__snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=a_ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def A ( self : Dict ):
"""simple docstring"""
with self.assertRaises(a_ ):
create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=a_ , d=a_ )
| 680 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple ) -> None:
__snake_case = len(lowercase_ )
print("The following activities are selected:" )
# The first activity is always selected
__snake_case = 0
print(lowercase_ , end="," )
# Consider rest of the activities
for j in range(lowercase_ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowercase_ , end="," )
__snake_case = j
if __name__ == "__main__":
import doctest
doctest.testmod()
a : Any = [1, 3, 0, 5, 8, 5]
a : Tuple = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 707 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
a : Any = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """sequence-classification"""
def __init__( self : List[str] , a_ : str ):
"""simple docstring"""
if type(a_ ) == dict:
__snake_case = Namespace(**a_ )
__snake_case = glue_output_modes[hparams.task]
__snake_case = glue_tasks_num_labels[hparams.task]
super().__init__(a_ , a_ , self.mode )
def A ( self : Union[str, Any] , **a_ : List[Any] ):
"""simple docstring"""
return self.model(**a_ )
def A ( self : int , a_ : Optional[Any] , a_ : int ):
"""simple docstring"""
__snake_case = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__snake_case = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
__snake_case = self(**a_ )
__snake_case = outputs[0]
__snake_case = self.trainer.lr_schedulers[0]["scheduler"]
__snake_case = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = self.hparams
__snake_case = processors[args.task]()
__snake_case = processor.get_labels()
for mode in ["train", "dev"]:
__snake_case = self._feature_file(a_ )
if os.path.exists(a_ ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , a_ )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
__snake_case = (
processor.get_dev_examples(args.data_dir )
if mode == "dev"
else processor.get_train_examples(args.data_dir )
)
__snake_case = convert_examples_to_features(
a_ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("Saving features into cached file %s" , a_ )
torch.save(a_ , a_ )
def A ( self : Optional[int] , a_ : str , a_ : int , a_ : bool = False ):
"""simple docstring"""
__snake_case = "dev" if mode == "test" else mode
__snake_case = self._feature_file(a_ )
logger.info("Loading features from cached file %s" , a_ )
__snake_case = torch.load(a_ )
__snake_case = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
__snake_case = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
__snake_case = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
__snake_case = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
__snake_case = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(a_ , a_ , a_ , a_ ) , batch_size=a_ , shuffle=a_ , )
def A ( self : int , a_ : List[str] , a_ : Tuple ):
"""simple docstring"""
__snake_case = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__snake_case = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
__snake_case = self(**a_ )
__snake_case , __snake_case = outputs[:2]
__snake_case = logits.detach().cpu().numpy()
__snake_case = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def A ( self : Dict , a_ : Optional[int] ):
"""simple docstring"""
__snake_case = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item()
__snake_case = np.concatenate([x["pred"] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
__snake_case = np.argmax(a_ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
__snake_case = np.squeeze(a_ )
__snake_case = np.concatenate([x["target"] for x in outputs] , axis=0 )
__snake_case = [[] for _ in range(out_label_ids.shape[0] )]
__snake_case = [[] for _ in range(out_label_ids.shape[0] )]
__snake_case = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , a_ , a_ )}
__snake_case = dict(results.items() )
__snake_case = results
return ret, preds_list, out_label_list
def A ( self : Tuple , a_ : list ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case = self._eval_end(a_ )
__snake_case = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def A ( self : int , a_ : Tuple ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case = self._eval_end(a_ )
__snake_case = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def A ( a_ : str , a_ : Any ):
"""simple docstring"""
BaseTransformer.add_model_specific_args(a_ , a_ )
parser.add_argument(
"--max_seq_length" , default=128 , type=a_ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--task" , default="" , type=a_ , required=a_ , help="The GLUE task to run" , )
parser.add_argument(
"--gpus" , default=0 , type=a_ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
def __UpperCAmelCase ( ) -> Union[str, Any]:
__snake_case = argparse.ArgumentParser()
add_generic_args(_UpperCAmelCase , os.getcwd() )
__snake_case = GLUETransformer.add_model_specific_args(_UpperCAmelCase , os.getcwd() )
__snake_case = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
__snake_case = os.path.join(
"./results" , F'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' , )
os.makedirs(args.output_dir )
__snake_case = GLUETransformer(_UpperCAmelCase )
__snake_case = generic_train(_UpperCAmelCase , _UpperCAmelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
__snake_case = sorted(glob.glob(os.path.join(args.output_dir , "checkpoint-epoch=*.ckpt" ) , recursive=_UpperCAmelCase ) )
__snake_case = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_UpperCAmelCase )
if __name__ == "__main__":
main()
| 680 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : Dict = 10**12 ) -> int:
__snake_case = 1
__snake_case = 0
__snake_case = 1
__snake_case = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(F'''{solution() = }''')
| 708 |
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("dataset_size" , [None, 4_00 * 2**20, 6_00 * 2**20] )
@pytest.mark.parametrize("input_in_memory_max_size" , ["default", 0, 1_00 * 2**20, 9_00 * 2**20] )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> int:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , "IN_MEMORY_MAX_SIZE" , _UpperCAmelCase )
__snake_case = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
__snake_case = dataset_size < in_memory_max_size
else:
__snake_case = False
__snake_case = is_small_dataset(_UpperCAmelCase )
assert result == expected
| 680 | 0 |
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] ) -> List[str]:
print(F'''Vertex\tShortest Distance from vertex {src}''' )
for i, d in enumerate(_UpperCAmelCase ):
print(F'''{i}\t\t{d}''' )
def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple ) -> int:
for j in range(_UpperCAmelCase ):
__snake_case , __snake_case , __snake_case = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
return True
return False
def __UpperCAmelCase ( _UpperCAmelCase : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : int ) -> Any:
__snake_case = [float("inf" )] * vertex_count
__snake_case = 0.0
for _ in range(vertex_count - 1 ):
for j in range(_UpperCAmelCase ):
__snake_case , __snake_case , __snake_case = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
__snake_case = distance[u] + w
__snake_case = check_negative_cycle(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if negative_cycle_exists:
raise Exception("Negative cycle found" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
a : Optional[Any] = int(input('''Enter number of vertices: ''').strip())
a : Tuple = int(input('''Enter number of edges: ''').strip())
a : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
a : Any = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
a : Tuple = {"""src""": src, """dst""": dest, """weight""": weight}
a : Any = int(input('''\nEnter shortest path source:''').strip())
a : List[Any] = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 709 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : float ) -> float:
if edge <= 0 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("Length must be a positive." )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def __UpperCAmelCase ( _UpperCAmelCase : float ) -> float:
if edge <= 0 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("Length must be a positive." )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 0 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''',
'''BridgeTower/bridgetower-base-itm-mlm''': (
'''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = """bridgetower_vision_model"""
def __init__( self : Optional[Any] , a_ : Any=768 , a_ : List[str]=12 , a_ : List[str]=3 , a_ : Union[str, Any]=16 , a_ : int=288 , a_ : int=1 , a_ : Tuple=1e-05 , a_ : Tuple=False , a_ : str=True , a_ : int=False , **a_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(**_a )
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_channels
__snake_case = patch_size
__snake_case = image_size
__snake_case = initializer_factor
__snake_case = layer_norm_eps
__snake_case = stop_gradient
__snake_case = share_layernorm
__snake_case = remove_last_layer
@classmethod
def A ( cls : Dict , a_ : List[str] , **a_ : List[str] ):
"""simple docstring"""
__snake_case = cls.get_config_dict(_a , **_a )
if config_dict.get("model_type" ) == "bridgetower":
__snake_case = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_a , **_a )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = """bridgetower_text_model"""
def __init__( self : int , a_ : Optional[int]=50_265 , a_ : List[Any]=768 , a_ : List[Any]=12 , a_ : Union[str, Any]=12 , a_ : List[Any]=1 , a_ : str=3_072 , a_ : Optional[int]="gelu" , a_ : Dict=0.1 , a_ : Dict=0.1 , a_ : Union[str, Any]=514 , a_ : Union[str, Any]=1 , a_ : Optional[Any]=1e-05 , a_ : Dict=1 , a_ : Optional[Any]=0 , a_ : Any=2 , a_ : int="absolute" , a_ : Optional[int]=True , **a_ : str , ):
"""simple docstring"""
super().__init__(**_a )
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = initializer_factor
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = layer_norm_eps
__snake_case = position_embedding_type
__snake_case = use_cache
__snake_case = pad_token_id
__snake_case = bos_token_id
__snake_case = eos_token_id
@classmethod
def A ( cls : List[str] , a_ : Union[str, Any] , **a_ : Optional[Any] ):
"""simple docstring"""
__snake_case = cls.get_config_dict(_a , **_a )
if config_dict.get("model_type" ) == "bridgetower":
__snake_case = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_a , **_a )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = """bridgetower"""
def __init__( self : List[str] , a_ : List[str]=True , a_ : List[str]="gelu" , a_ : Union[str, Any]=768 , a_ : Optional[Any]=1 , a_ : Union[str, Any]=1e-05 , a_ : str=False , a_ : int="add" , a_ : Dict=12 , a_ : Tuple=6 , a_ : List[str]=False , a_ : Union[str, Any]=False , a_ : Tuple=None , a_ : List[str]=None , **a_ : str , ):
"""simple docstring"""
__snake_case = kwargs.pop("text_config_dict" , _a )
__snake_case = kwargs.pop("vision_config_dict" , _a )
super().__init__(**_a )
__snake_case = share_cross_modal_transformer_layers
__snake_case = hidden_act
__snake_case = hidden_size
__snake_case = initializer_factor
__snake_case = layer_norm_eps
__snake_case = share_link_tower_layers
__snake_case = link_tower_type
__snake_case = num_attention_heads
__snake_case = num_hidden_layers
__snake_case = tie_word_embeddings
__snake_case = init_layernorm_from_vision_encoder
if text_config is None:
__snake_case = {}
logger.info("`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values." )
if vision_config is None:
__snake_case = {}
logger.info("`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values." )
__snake_case = BridgeTowerTextConfig(**_a )
__snake_case = BridgeTowerVisionConfig(**_a )
@classmethod
def A ( cls : int , a_ : List[Any] , a_ : List[str] , **a_ : Dict ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_a )
def A ( self : Dict ):
"""simple docstring"""
__snake_case = copy.deepcopy(self.__dict__ )
__snake_case = self.text_config.to_dict()
__snake_case = self.vision_config.to_dict()
__snake_case = self.__class__.model_type
return output
| 710 |
'''simple docstring'''
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
a : Any = 6_378_137.0
a : List[Any] = 6_356_752.314_245
a : Dict = 6_378_137
def __UpperCAmelCase ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float ) -> float:
__snake_case = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
__snake_case = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
__snake_case = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
__snake_case = haversine_distance(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
__snake_case = (b_lata + b_lata) / 2
__snake_case = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
__snake_case = (sin(_UpperCAmelCase ) ** 2) * (cos(_UpperCAmelCase ) ** 2)
__snake_case = cos(sigma / 2 ) ** 2
__snake_case = (sigma - sin(_UpperCAmelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
__snake_case = (cos(_UpperCAmelCase ) ** 2) * (sin(_UpperCAmelCase ) ** 2)
__snake_case = sin(sigma / 2 ) ** 2
__snake_case = (sigma + sin(_UpperCAmelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : Union[str, Any] = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
a : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 711 |
'''simple docstring'''
import math
import sys
import cva
import numpy as np
def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float ) -> np.ndarray:
# For applying gaussian function for each element in matrix.
__snake_case = math.sqrt(_UpperCAmelCase )
__snake_case = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> np.ndarray:
__snake_case = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : float ) -> np.ndarray:
# Creates a gaussian kernel of given dimension.
__snake_case = np.zeros((kernel_size, kernel_size) )
for i in range(0 , _UpperCAmelCase ):
for j in range(0 , _UpperCAmelCase ):
__snake_case = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_UpperCAmelCase , _UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : int , ) -> np.ndarray:
__snake_case = np.zeros(img.shape )
__snake_case = get_gauss_kernel(_UpperCAmelCase , _UpperCAmelCase )
__snake_case , __snake_case = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__snake_case = get_slice(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__snake_case = img_s - img_s[kernel_size // 2, kernel_size // 2]
__snake_case = vec_gaussian(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = np.multiply(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = np.multiply(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = np.sum(_UpperCAmelCase ) / np.sum(_UpperCAmelCase )
__snake_case = val
return imga
def __UpperCAmelCase ( _UpperCAmelCase : list ) -> tuple:
__snake_case = args[1] if args[1:] else "../image_data/lena.jpg"
__snake_case = float(args[2] ) if args[2:] else 1.0
__snake_case = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__snake_case = int(args[4] )
__snake_case = kernel_size + abs(kernel_size % 2 - 1 )
else:
__snake_case = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
a , a , a , a : Tuple = parse_args(sys.argv)
a : Tuple = cva.imread(filename, 0)
cva.imshow('''input image''', img)
a : Dict = img / 255
a : str = out.astype('''float32''')
a : Union[str, Any] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
a : Dict = out * 255
a : List[str] = np.uinta(out)
cva.imshow('''output image''', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 680 | 0 |
'''simple docstring'''
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class SCREAMING_SNAKE_CASE__ ( lowerCAmelCase__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = CpmAntTokenizer
__SCREAMING_SNAKE_CASE = False
def A ( self : int ):
"""simple docstring"""
super().setUp()
__snake_case = [
"<d>",
"</d>",
"<s>",
"</s>",
"</_>",
"<unk>",
"<pad>",
"</n>",
"我",
"是",
"C",
"P",
"M",
"A",
"n",
"t",
]
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
@tooslow
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = CpmAntTokenizer.from_pretrained("openbmb/cpm-ant-10b" )
__snake_case = "今天天气真好!"
__snake_case = ["今天", "天气", "真", "好", "!"]
__snake_case = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
__snake_case = "今天天气真好!"
__snake_case = [tokenizer.bos_token] + tokens
__snake_case = [6, 9_802, 14_962, 2_082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
__snake_case = tokenizer.decode(_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
| 712 |
'''simple docstring'''
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any , a_ : Dict , a_ : Union[str, Any] , a_ : Tuple ):
"""simple docstring"""
__snake_case = name
__snake_case = value
__snake_case = weight
def __repr__( self : Optional[int] ):
"""simple docstring"""
return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def A ( self : Any ):
"""simple docstring"""
return self.value
def A ( self : str ):
"""simple docstring"""
return self.name
def A ( self : int ):
"""simple docstring"""
return self.weight
def A ( self : Tuple ):
"""simple docstring"""
return self.value / self.weight
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
__snake_case = []
for i in range(len(_UpperCAmelCase ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> int:
__snake_case = sorted(_UpperCAmelCase , key=_UpperCAmelCase , reverse=_UpperCAmelCase )
__snake_case = []
__snake_case , __snake_case = 0.0, 0.0
for i in range(len(_UpperCAmelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __UpperCAmelCase ( ) -> Optional[Any]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
a : Optional[int] = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def __UpperCAmelCase ( _UpperCAmelCase : List[str] ) -> Dict:
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__snake_case = k.replace(__snake_case , __snake_case )
if k.startswith("encoder" ):
__snake_case = k.replace(".attn" , ".self_attn" )
__snake_case = k.replace("norm1" , "self_attn_layer_norm" )
__snake_case = k.replace("norm2" , "final_layer_norm" )
elif k.startswith("decoder" ):
__snake_case = k.replace("norm1" , "self_attn_layer_norm" )
__snake_case = k.replace("norm2" , "encoder_attn_layer_norm" )
__snake_case = k.replace("norm3" , "final_layer_norm" )
return k
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] ) -> List[Any]:
__snake_case = [
"model.encoder.layernorm_embedding.weight",
"model.encoder.layernorm_embedding.bias",
"model.decoder.layernorm_embedding.weight",
"model.decoder.layernorm_embedding.bias",
]
for k in keys:
__snake_case = sd.pop(__snake_case )
__snake_case = k.replace("layernorm_embedding" , "layer_norm" )
assert new_k not in sd
__snake_case = v
a : Tuple = ["START"]
@torch.no_grad()
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] ) -> Union[str, Any]:
__snake_case = torch.load(__snake_case , map_location="cpu" )
__snake_case = model["model"]
__snake_case = BlenderbotConfig.from_json_file(__snake_case )
__snake_case = BlenderbotForConditionalGeneration(__snake_case )
__snake_case = m.model.state_dict().keys()
__snake_case = []
__snake_case = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__snake_case = rename_state_dict_key(__snake_case )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__snake_case = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__snake_case )
m.model.load_state_dict(__snake_case , strict=__snake_case )
m.half()
m.save_pretrained(__snake_case )
if __name__ == "__main__":
a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''')
parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''')
parser.add_argument(
'''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use'''
)
a : Optional[Any] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 713 |
'''simple docstring'''
import os
from math import logaa
def __UpperCAmelCase ( _UpperCAmelCase : str = "base_exp.txt" ) -> int:
__snake_case = 0
__snake_case = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(_UpperCAmelCase ) , _UpperCAmelCase ) ) ):
__snake_case , __snake_case = list(map(_UpperCAmelCase , line.split("," ) ) )
if x * logaa(_UpperCAmelCase ) > largest:
__snake_case = x * logaa(_UpperCAmelCase )
__snake_case = i + 1
return result
if __name__ == "__main__":
print(solution())
| 680 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase ) -> List[Any]:
return "".join([hex(a__ )[2:].zfill(2 ).upper() for byte in list(a__ )] )
def __UpperCAmelCase ( _UpperCAmelCase ) -> int:
if (len(a__ ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(a__ ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(a__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
a : List[Any] = logging.get_logger(__name__)
a : Dict = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
a : Any = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
a : Optional[int] = {
'''facebook/blenderbot_small-90M''': 512,
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = BlenderbotSmallTokenizer
def __init__( self : List[Any] , a_ : Optional[int]=None , a_ : Dict=None , a_ : int="<|endoftext|>" , a_ : str="<|endoftext|>" , a_ : Any="<|endoftext|>" , a_ : Dict=False , a_ : Optional[Any]=True , **a_ : Dict , ):
"""simple docstring"""
super().__init__(
ByteLevelBPETokenizer(
vocab=a_ , merges=a_ , add_prefix_space=a_ , trim_offsets=a_ , ) , bos_token=a_ , eos_token=a_ , unk_token=a_ , **a_ , )
__snake_case = add_prefix_space
def A ( self : Dict , a_ : int , a_ : Union[str, Any]=None ):
"""simple docstring"""
__snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A ( self : str , a_ : List[int] , a_ : Optional[List[int]] = None ):
"""simple docstring"""
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 680 | 0 |
'''simple docstring'''
from collections import namedtuple
import requests
from lxml import html # type: ignore
a : Dict = namedtuple('''covid_data''', '''cases deaths recovered''')
def __UpperCAmelCase ( _UpperCAmelCase : str = "https://www.worldometers.info/coronavirus/" ) -> Optional[int]:
__snake_case = "//div[@class = \"maincounter-number\"]/span/text()"
return covid_data(*html.fromstring(requests.get(UpperCamelCase__ ).content ).xpath(UpperCamelCase__ ) )
a : int = '''Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}'''
print(fmt.format(*covid_stats()))
| 715 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a : str = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
a : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 680 | 0 |
'''simple docstring'''
import heapq
import sys
import numpy as np
a : List[Any] = tuple[int, int]
class SCREAMING_SNAKE_CASE__ :
def __init__( self : str ):
"""simple docstring"""
__snake_case = []
__snake_case = set()
def A ( self : int ):
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float("inf" )
def A ( self : str ):
"""simple docstring"""
return len(self.elements ) == 0
def A ( self : Dict , a_ : Tuple , a_ : Any ):
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(__SCREAMING_SNAKE_CASE )
else:
# update
# print("update", item)
__snake_case = []
((__snake_case) , (__snake_case)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__snake_case) , (__snake_case)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def A ( self : List[Any] , a_ : int ):
"""simple docstring"""
if item in self.set:
self.set.remove(__SCREAMING_SNAKE_CASE )
__snake_case = []
((__snake_case) , (__snake_case)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__snake_case) , (__snake_case)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def A ( self : str ):
"""simple docstring"""
return self.elements[0][1]
def A ( self : Tuple ):
"""simple docstring"""
((__snake_case) , (__snake_case)) = heapq.heappop(self.elements )
self.set.remove(__SCREAMING_SNAKE_CASE )
return (priority, item)
def A__ ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
# euclidean distance
__snake_case = np.array(_UpperCAmelCase )
__snake_case = np.array(_UpperCAmelCase )
return np.linalg.norm(a - b )
def A__ ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] ) -> Tuple:
# integer division by time variable
return consistent_heuristic(_UpperCAmelCase , _UpperCAmelCase ) // t
def A__ ( _UpperCAmelCase : Any , _UpperCAmelCase : int ) -> Any:
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def A__ ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any ) -> List[Any]:
__snake_case = g_function[start] + Wa * heuristics[i](_UpperCAmelCase , _UpperCAmelCase )
return ans
def A__ ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] ) -> Dict:
__snake_case = np.chararray((n, n) )
for i in range(_UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
__snake_case = "*"
for i in range(_UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
if (j, (n - 1) - i) in blocks:
__snake_case = "#"
__snake_case = "-"
__snake_case = back_pointer[goal]
while x != start:
((__snake_case) , (__snake_case)) = x
# print(x)
__snake_case = "-"
__snake_case = back_pointer[x]
__snake_case = "-"
for i in range(_UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=" " )
print("<-- End position" , end=" " )
else:
print(grid[i][j] , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
print("PATH TAKEN BY THE ALGORITHM IS:-" )
__snake_case = back_pointer[goal]
while x != start:
print(_UpperCAmelCase , end=" " )
__snake_case = back_pointer[x]
print(_UpperCAmelCase )
sys.exit()
def A__ ( _UpperCAmelCase : Dict ) -> str:
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def A__ ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str] , ) -> Any:
for itera in range(_UpperCAmelCase ):
open_list[itera].remove_element(_UpperCAmelCase )
# print("s", s)
# print("j", j)
((__snake_case) , (__snake_case)) = s
__snake_case = (x - 1, y)
__snake_case = (x + 1, y)
__snake_case = (x, y + 1)
__snake_case = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(_UpperCAmelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(_UpperCAmelCase )
__snake_case = -1
__snake_case = float("inf" )
if valid(_UpperCAmelCase ) and g_function[neighbours] > g_function[s] + 1:
__snake_case = g_function[s] + 1
__snake_case = s
if neighbours not in close_list_anchor:
open_list[0].put(_UpperCAmelCase , key(_UpperCAmelCase , 0 , _UpperCAmelCase , _UpperCAmelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , _UpperCAmelCase ):
if key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) <= Wa * key(
_UpperCAmelCase , 0 , _UpperCAmelCase , _UpperCAmelCase ):
open_list[j].put(
_UpperCAmelCase , key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) )
def A__ ( ) -> str:
__snake_case = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
a : List[Any] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
a : str = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
a : Union[str, Any] = make_common_ground()
a : Any = blocks_blk
# hyper parameters
a : Union[str, Any] = 1
a : List[Any] = 1
a : List[Any] = 20
a : List[Any] = 3 # one consistent and two other inconsistent
# start and end destination
a : Union[str, Any] = (0, 0)
a : Tuple = (n - 1, n - 1)
a : Optional[int] = 1
def A__ ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int ) -> List[str]:
__snake_case = {start: 0, goal: float("inf" )}
__snake_case = {start: -1, goal: -1}
__snake_case = []
__snake_case = set()
for i in range(_UpperCAmelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(_UpperCAmelCase , key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) )
__snake_case = []
__snake_case = []
while open_list[0].minkey() < float("inf" ):
for i in range(1 , _UpperCAmelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("inf" ):
do_something(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
__snake_case , __snake_case = open_list[i].top_show()
visited.add(_UpperCAmelCase )
expand_state(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
close_list_inad.append(_UpperCAmelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("inf" ):
do_something(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
__snake_case = open_list[0].top_show()
visited.add(_UpperCAmelCase )
expand_state(
_UpperCAmelCase , 0 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
close_list_anchor.append(_UpperCAmelCase )
print("No path found to goal" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(_UpperCAmelCase ):
if (j, i) in blocks:
print("#" , end=" " )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("*" , end=" " )
else:
print("-" , end=" " )
else:
print("*" , end=" " )
if (j, i) == (n - 1, n - 1):
print("<-- End position" , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 716 |
'''simple docstring'''
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
a : Optional[Any] = float('''nan''')
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any , a_ : Optional[int] ):
"""simple docstring"""
__snake_case = sys.stdout
__snake_case = open(a_ , "a" )
def __getattr__( self : str , a_ : List[Any] ):
"""simple docstring"""
return getattr(self.stdout , a_ )
def A ( self : Union[str, Any] , a_ : List[Any] ):
"""simple docstring"""
self.stdout.write(a_ )
# strip tqdm codes
self.file.write(re.sub(r"^.*\r" , "" , a_ , 0 , re.M ) )
def __UpperCAmelCase ( _UpperCAmelCase : int=80 , _UpperCAmelCase : Any=False ) -> Optional[int]:
__snake_case = []
# deal with critical env vars
__snake_case = ["CUDA_VISIBLE_DEVICES"]
for key in env_keys:
__snake_case = os.environ.get(_UpperCAmelCase , _UpperCAmelCase )
if val is not None:
cmd.append(F'''{key}={val}''' )
# python executable (not always needed if the script is executable)
__snake_case = sys.executable if full_python_path else sys.executable.split("/" )[-1]
cmd.append(_UpperCAmelCase )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
__snake_case = []
__snake_case = ""
while len(_UpperCAmelCase ) > 0:
current_line += F'''{cmd.pop(0 )} '''
if len(_UpperCAmelCase ) == 0 or len(_UpperCAmelCase ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(_UpperCAmelCase )
__snake_case = ""
return "\\\n".join(_UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Tuple:
# unwrap multi-line input
__snake_case = re.sub(R"[\\\n]+" , " " , args.base_cmd )
# remove --output_dir if any and set our own
__snake_case = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd )
args.base_cmd += F''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
__snake_case = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Any ) -> str:
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 1_00 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.2222_2222] )} , )
__snake_case = subprocess.run(_UpperCAmelCase , capture_output=_UpperCAmelCase , text=_UpperCAmelCase )
if verbose:
print("STDOUT" , result.stdout )
print("STDERR" , result.stderr )
# save the streams
__snake_case = variation.replace(" " , "-" )
with open(Path(_UpperCAmelCase ) / F'''log.{prefix}.stdout.txt''' , "w" ) as f:
f.write(result.stdout )
with open(Path(_UpperCAmelCase ) / F'''log.{prefix}.stderr.txt''' , "w" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("failed" )
return {target_metric_key: nan}
with io.open(F'''{output_dir}/all_results.json''' , "r" , encoding="utf-8" ) as f:
__snake_case = json.load(_UpperCAmelCase )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict , ) -> Dict:
__snake_case = []
__snake_case = []
__snake_case = F'''{id}: {variation:<{longest_variation_len}}'''
__snake_case = F'''{preamble}: '''
__snake_case = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(_UpperCAmelCase ) , desc=_UpperCAmelCase , leave=_UpperCAmelCase ):
__snake_case = process_run_single(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__snake_case = single_run_metrics[target_metric_key]
if not math.isnan(_UpperCAmelCase ):
metrics.append(_UpperCAmelCase )
results.append(_UpperCAmelCase )
outcome += "✓"
else:
outcome += "✘"
__snake_case = F'''\33[2K\r{outcome}'''
if len(_UpperCAmelCase ) > 0:
__snake_case = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
__snake_case = round(mean_metrics[target_metric_key] , 2 )
__snake_case = F'''{outcome} {mean_target}'''
if len(_UpperCAmelCase ) > 1:
results_str += F''' {tuple(round(_UpperCAmelCase , 2 ) for x in results )}'''
print(_UpperCAmelCase )
__snake_case = variation
return mean_metrics
else:
print(_UpperCAmelCase )
return {variation_key: variation, target_metric_key: nan}
def __UpperCAmelCase ( ) -> Optional[int]:
__snake_case = torch.cuda.get_device_properties(torch.device("cuda" ) )
return F'''
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
'''
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple ) -> List[Any]:
__snake_case = pd.DataFrame(_UpperCAmelCase )
__snake_case = "variation"
__snake_case = "diff_%"
__snake_case = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
__snake_case = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(_UpperCAmelCase ):
# as a fallback, use the minimal value as the sentinel
__snake_case = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(_UpperCAmelCase ):
__snake_case = df.apply(
lambda _UpperCAmelCase : round(1_00 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="columns" , )
# re-order columns
__snake_case = [variation_key, target_metric_key, diff_key, *report_metric_keys]
__snake_case = df.reindex(_UpperCAmelCase , axis="columns" ) # reorder cols
# capitalize
__snake_case = df.rename(str.capitalize , axis="columns" )
# make the cols as narrow as possible
__snake_case = df.rename(lambda _UpperCAmelCase : c.replace("_" , "<br>" ) , axis="columns" )
__snake_case = df.rename(lambda _UpperCAmelCase : c.replace("_" , "\n" ) , axis="columns" )
__snake_case = ["", "Copy between the cut-here-lines and paste as is to github or a forum"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=_UpperCAmelCase , floatfmt=".2f" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=_UpperCAmelCase , floatfmt=".2f" )]
print("\n\n".join(_UpperCAmelCase ) )
def __UpperCAmelCase ( ) -> Dict:
__snake_case = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Base cmd" , )
parser.add_argument(
"--variations" , default=_UpperCAmelCase , type=_UpperCAmelCase , nargs="+" , required=_UpperCAmelCase , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , )
parser.add_argument(
"--base-variation" , default=_UpperCAmelCase , type=_UpperCAmelCase , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , )
parser.add_argument(
"--target-metric-key" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , )
parser.add_argument(
"--report-metric-keys" , default="" , type=_UpperCAmelCase , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , )
parser.add_argument(
"--repeat-times" , default=1 , type=_UpperCAmelCase , help="How many times to re-run each variation - an average will be reported" , )
parser.add_argument(
"--output_dir" , default="output_benchmark" , type=_UpperCAmelCase , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , )
parser.add_argument(
"--verbose" , default=_UpperCAmelCase , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , )
__snake_case = parser.parse_args()
__snake_case = args.output_dir
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
__snake_case = get_base_command(_UpperCAmelCase , _UpperCAmelCase )
# split each dimension into its --foo variations
__snake_case = [list(map(str.strip , re.split(R"\|" , _UpperCAmelCase ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
__snake_case = list(map(str.strip , map(" ".join , itertools.product(*_UpperCAmelCase ) ) ) )
__snake_case = max(len(_UpperCAmelCase ) for x in variations )
# split wanted keys
__snake_case = args.report_metric_keys.split()
# capture prints into a log file for convenience
__snake_case = F'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'''
print(F'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(F'''and this script\'s output is also piped into {report_fn}''' )
__snake_case = Tee(_UpperCAmelCase )
print(F'''\n*** Running {len(_UpperCAmelCase )} benchmarks:''' )
print(F'''Base command: {" ".join(_UpperCAmelCase )}''' )
__snake_case = "variation"
__snake_case = []
for id, variation in enumerate(tqdm(_UpperCAmelCase , desc="Total completion: " , leave=_UpperCAmelCase ) ):
__snake_case = base_cmd + variation.split()
results.append(
process_run(
id + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , args.target_metric_key , _UpperCAmelCase , args.repeat_times , _UpperCAmelCase , args.verbose , ) )
process_results(_UpperCAmelCase , args.target_metric_key , _UpperCAmelCase , args.base_variation , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 680 | 0 |
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def __UpperCAmelCase ( _UpperCAmelCase : str ) -> str:
__snake_case = tmp_path / 'file.csv'
__snake_case = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20\n " )
with open(_UpperCAmelCase , "w" ) as f:
f.write(_UpperCAmelCase )
return str(_UpperCAmelCase )
@pytest.fixture
def __UpperCAmelCase ( _UpperCAmelCase : List[str] ) -> Dict:
__snake_case = tmp_path / 'malformed_file.csv'
__snake_case = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20,\n " )
with open(_UpperCAmelCase , "w" ) as f:
f.write(_UpperCAmelCase )
return str(_UpperCAmelCase )
@pytest.fixture
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any ) -> Union[str, Any]:
__snake_case = tmp_path / 'csv_with_image.csv'
__snake_case = textwrap.dedent(
F'''\
image
{image_file}
''' )
with open(_UpperCAmelCase , "w" ) as f:
f.write(_UpperCAmelCase )
return str(_UpperCAmelCase )
@pytest.fixture
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> str:
__snake_case = tmp_path / 'csv_with_label.csv'
__snake_case = textwrap.dedent(
"\\n label\n good\n bad\n good\n " )
with open(_UpperCAmelCase , "w" ) as f:
f.write(_UpperCAmelCase )
return str(_UpperCAmelCase )
@pytest.fixture
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] ) -> Dict:
__snake_case = tmp_path / 'csv_with_int_list.csv'
__snake_case = textwrap.dedent(
"\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n " )
with open(_UpperCAmelCase , "w" ) as f:
f.write(_UpperCAmelCase )
return str(_UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Tuple ) -> Tuple:
__snake_case = Csv()
__snake_case = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(_UpperCAmelCase , match="Error tokenizing data" ):
for _ in generator:
pass
assert any(
record.levelname == "ERROR"
and "Failed to read file" in record.message
and os.path.basename(_UpperCAmelCase ) in record.message
for record in caplog.records )
@require_pil
def __UpperCAmelCase ( _UpperCAmelCase : Dict ) -> Optional[int]:
with open(_UpperCAmelCase , encoding="utf-8" ) as f:
__snake_case = f.read().splitlines()[1]
__snake_case = Csv(encoding="utf-8" , features=Features({"image": Image()} ) )
__snake_case = csv._generate_tables([[csv_file_with_image]] )
__snake_case = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("image" ).type == Image()()
__snake_case = pa_table.to_pydict()['image']
assert generated_content == [{"path": image_file, "bytes": None}]
def __UpperCAmelCase ( _UpperCAmelCase : Dict ) -> List[Any]:
with open(_UpperCAmelCase , encoding="utf-8" ) as f:
__snake_case = f.read().splitlines()[1:]
__snake_case = Csv(encoding="utf-8" , features=Features({"label": ClassLabel(names=["good", "bad"] )} ) )
__snake_case = csv._generate_tables([[csv_file_with_label]] )
__snake_case = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("label" ).type == ClassLabel(names=["good", "bad"] )()
__snake_case = pa_table.to_pydict()['label']
assert generated_content == [ClassLabel(names=["good", "bad"] ).straint(_UpperCAmelCase ) for label in labels]
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] ) -> int:
__snake_case = Csv(encoding="utf-8" , sep="," , converters={"int_list": lambda _UpperCAmelCase : [int(_UpperCAmelCase ) for i in x.split()]} )
__snake_case = csv._generate_tables([[csv_file_with_int_list]] )
__snake_case = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("int_list" ).type )
__snake_case = pa_table.to_pydict()['int_list']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 717 |
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def __UpperCAmelCase ( _UpperCAmelCase : Dict ) -> int: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def __UpperCAmelCase ( ) -> Dict:
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
__snake_case = [1, 2, 3]
with pytest.raises(_UpperCAmelCase ):
with parallel_backend("unsupported backend" ):
map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=2 )
with pytest.raises(_UpperCAmelCase ):
with parallel_backend("unsupported backend" ):
map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" , [2, -1] )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] ) -> Optional[int]:
__snake_case = [1, 2]
__snake_case = {"a": 1, "b": 2}
__snake_case = {"a": [1, 2], "b": [3, 4]}
__snake_case = {"a": {"1": 1}, "b": 2}
__snake_case = {"a": 1, "b": 2, "c": 3, "d": 4}
__snake_case = [2, 3]
__snake_case = {"a": 2, "b": 3}
__snake_case = {"a": [2, 3], "b": [4, 5]}
__snake_case = {"a": {"1": 2}, "b": 3}
__snake_case = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
| 680 | 0 |
'''simple docstring'''
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = FunnelTokenizer
__SCREAMING_SNAKE_CASE = FunnelTokenizerFast
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
def A ( self : Tuple ):
"""simple docstring"""
super().setUp()
__snake_case = [
"<unk>",
"<cls>",
"<sep>",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def A ( self : List[str] , **a_ : Optional[Any] ):
"""simple docstring"""
return FunnelTokenizer.from_pretrained(self.tmpdirname , **__A )
def A ( self : List[Any] , **a_ : List[Any] ):
"""simple docstring"""
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **__A )
def A ( self : Union[str, Any] , a_ : List[Any] ):
"""simple docstring"""
__snake_case = "UNwant\u00E9d,running"
__snake_case = "unwanted, running"
return input_text, output_text
def A ( self : Dict ):
"""simple docstring"""
__snake_case = self.tokenizer_class(self.vocab_file )
__snake_case = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(__A , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , [7, 4, 5, 10, 8, 9] )
def A ( self : str ):
"""simple docstring"""
__snake_case = self.get_tokenizers(do_lower_case=__A )
for tokenizer in tokenizers:
__snake_case = tokenizer("UNwant\u00E9d,running" )
__snake_case = len(inputs["input_ids"] ) - 1
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len )
__snake_case = tokenizer("UNwant\u00E9d,running" , "UNwant\u00E9d,running" )
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len + [1] * sentence_len )
| 718 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : int = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """mobilenet_v2"""
def __init__( self : Tuple , a_ : int=3 , a_ : int=224 , a_ : List[Any]=1.0 , a_ : List[str]=8 , a_ : Dict=8 , a_ : Optional[Any]=6 , a_ : Optional[Any]=32 , a_ : str=True , a_ : Union[str, Any]=True , a_ : List[Any]="relu6" , a_ : Optional[Any]=True , a_ : Any=0.8 , a_ : Dict=0.02 , a_ : Optional[int]=0.001 , a_ : Optional[int]=255 , **a_ : List[str] , ):
"""simple docstring"""
super().__init__(**a_ )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
__snake_case = num_channels
__snake_case = image_size
__snake_case = depth_multiplier
__snake_case = depth_divisible_by
__snake_case = min_depth
__snake_case = expand_ratio
__snake_case = output_stride
__snake_case = first_layer_is_expansion
__snake_case = finegrained_output
__snake_case = hidden_act
__snake_case = tf_padding
__snake_case = classifier_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = semantic_loss_ignore_index
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = version.parse("""1.11""" )
@property
def A ( self : Optional[int] ):
"""simple docstring"""
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def A ( self : Optional[int] ):
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def A ( self : int ):
"""simple docstring"""
return 1e-4
| 680 | 0 |
'''simple docstring'''
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
a : Optional[int] = '''sshleifer/bart-tiny-random'''
a : str = '''patrickvonplaten/t5-tiny-random'''
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def A ( self : Dict ):
"""simple docstring"""
return AutoConfig.from_pretrained(_lowercase )
def A ( self : int ):
"""simple docstring"""
__snake_case = create_student_by_copying_alternating_layers(_lowercase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def A ( self : int ):
"""simple docstring"""
__snake_case = create_student_by_copying_alternating_layers(_lowercase , tempfile.mkdtemp() , e=1 , d=_lowercase )
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = create_student_by_copying_alternating_layers(_lowercase , tempfile.mkdtemp() , e=1 , d=_lowercase )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def A ( self : str ):
"""simple docstring"""
__snake_case = create_student_by_copying_alternating_layers(_lowercase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def A ( self : Tuple ):
"""simple docstring"""
with self.assertRaises(_lowercase ):
create_student_by_copying_alternating_layers(_lowercase , tempfile.mkdtemp() , e=_lowercase , d=_lowercase )
| 719 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : List[Any] = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """data2vec-text"""
def __init__( self : List[str] , a_ : str=30_522 , a_ : Optional[int]=768 , a_ : Dict=12 , a_ : int=12 , a_ : Dict=3_072 , a_ : Dict="gelu" , a_ : Optional[Any]=0.1 , a_ : List[str]=0.1 , a_ : int=512 , a_ : Any=2 , a_ : int=0.02 , a_ : Dict=1e-12 , a_ : Dict=1 , a_ : Any=0 , a_ : Dict=2 , a_ : Optional[int]="absolute" , a_ : List[Any]=True , a_ : Dict=None , **a_ : List[str] , ):
"""simple docstring"""
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = position_embedding_type
__snake_case = use_cache
__snake_case = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
@property
def A ( self : Any ):
"""simple docstring"""
if self.task == "multiple-choice":
__snake_case = {0: "batch", 1: "choice", 2: "sequence"}
else:
__snake_case = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 680 | 0 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
a : List[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def __init__( self : Union[str, Any] , *a_ : Dict , **a_ : Optional[int] ):
"""simple docstring"""
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead." , A__ , )
super().__init__(*A__ , **A__ )
| 720 |
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
a : Tuple = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def A ( self : Union[str, Any] , a_ : List[str] , a_ : Optional[int] , a_ : List[str]=None , a_ : Any=None ):
"""simple docstring"""
__snake_case = self.layer[current_layer](a_ , a_ , head_mask[current_layer] )
__snake_case = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"""The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.""" , _UpperCamelCase , )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def __init__( self : int , a_ : int ):
"""simple docstring"""
super().__init__(a_ )
__snake_case = BertEncoderWithPabee(a_ )
self.init_weights()
__snake_case = 0
__snake_case = 0
__snake_case = 0
__snake_case = 0
def A ( self : Optional[int] , a_ : Union[str, Any] ):
"""simple docstring"""
__snake_case = threshold
def A ( self : Optional[Any] , a_ : Union[str, Any] ):
"""simple docstring"""
__snake_case = patience
def A ( self : Any ):
"""simple docstring"""
__snake_case = 0
__snake_case = 0
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.inference_layers_num / self.inference_instances_num
__snake_case = (
f'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
f''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(a_ )
@add_start_docstrings_to_model_forward(a_ )
def A ( self : Dict , a_ : Optional[Any]=None , a_ : Union[str, Any]=None , a_ : int=None , a_ : Optional[int]=None , a_ : int=None , a_ : Optional[Any]=None , a_ : Union[str, Any]=None , a_ : int=None , a_ : Any=None , a_ : Optional[Any]=None , a_ : Any=False , ):
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
__snake_case = input_ids.size()
elif inputs_embeds is not None:
__snake_case = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
__snake_case = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__snake_case = torch.ones(a_ , device=a_ )
if token_type_ids is None:
__snake_case = torch.zeros(a_ , dtype=torch.long , device=a_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__snake_case = self.get_extended_attention_mask(a_ , a_ , a_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
__snake_case , __snake_case , __snake_case = encoder_hidden_states.size()
__snake_case = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
__snake_case = torch.ones(a_ , device=a_ )
__snake_case = self.invert_attention_mask(a_ )
else:
__snake_case = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__snake_case = self.get_head_mask(a_ , self.config.num_hidden_layers )
__snake_case = self.embeddings(
input_ids=a_ , position_ids=a_ , token_type_ids=a_ , inputs_embeds=a_ )
__snake_case = embedding_output
if self.training:
__snake_case = []
for i in range(self.config.num_hidden_layers ):
__snake_case = self.encoder.adaptive_forward(
a_ , current_layer=a_ , attention_mask=a_ , head_mask=a_ )
__snake_case = self.pooler(a_ )
__snake_case = output_layers[i](output_dropout(a_ ) )
res.append(a_ )
elif self.patience == 0: # Use all layers for inference
__snake_case = self.encoder(
a_ , attention_mask=a_ , head_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , )
__snake_case = self.pooler(encoder_outputs[0] )
__snake_case = [output_layers[self.config.num_hidden_layers - 1](a_ )]
else:
__snake_case = 0
__snake_case = None
__snake_case = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
__snake_case = self.encoder.adaptive_forward(
a_ , current_layer=a_ , attention_mask=a_ , head_mask=a_ )
__snake_case = self.pooler(a_ )
__snake_case = output_layers[i](a_ )
if regression:
__snake_case = logits.detach()
if patient_result is not None:
__snake_case = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
__snake_case = 0
else:
__snake_case = logits.detach().argmax(dim=1 )
if patient_result is not None:
__snake_case = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(a_ ) ):
patient_counter += 1
else:
__snake_case = 0
__snake_case = logits
if patient_counter == self.patience:
break
__snake_case = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"""Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """ , _UpperCamelCase , )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def __init__( self : List[str] , a_ : Tuple ):
"""simple docstring"""
super().__init__(a_ )
__snake_case = config.num_labels
__snake_case = BertModelWithPabee(a_ )
__snake_case = nn.Dropout(config.hidden_dropout_prob )
__snake_case = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(a_ )
def A ( self : int , a_ : str=None , a_ : Tuple=None , a_ : Union[str, Any]=None , a_ : List[str]=None , a_ : Optional[int]=None , a_ : Union[str, Any]=None , a_ : Tuple=None , ):
"""simple docstring"""
__snake_case = self.bert(
input_ids=a_ , attention_mask=a_ , token_type_ids=a_ , position_ids=a_ , head_mask=a_ , inputs_embeds=a_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
__snake_case = (logits[-1],)
if labels is not None:
__snake_case = None
__snake_case = 0
for ix, logits_item in enumerate(a_ ):
if self.num_labels == 1:
# We are doing regression
__snake_case = MSELoss()
__snake_case = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
__snake_case = CrossEntropyLoss()
__snake_case = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
__snake_case = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
__snake_case = (total_loss / total_weights,) + outputs
return outputs
| 680 | 0 |
'''simple docstring'''
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE = JukeboxTokenizer
__SCREAMING_SNAKE_CASE = {
'artist': 'Zac Brown Band',
'genres': 'Country',
'lyrics': 'I met a traveller from an antique land,\n Who said "Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ',
}
@require_torch
def A ( self : Optional[Any] ):
"""simple docstring"""
import torch
__snake_case = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics" )
__snake_case = tokenizer(**self.metas )["input_ids"]
# fmt: off
__snake_case = [
torch.tensor([[
0, 0, 0, 7_169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def A ( self : Optional[Any] ):
"""simple docstring"""
import torch
__snake_case = JukeboxTokenizer.from_pretrained("openai/jukebox-5b-lyrics" )
__snake_case = tokenizer(**self.metas )["input_ids"]
# fmt: off
__snake_case = [
torch.tensor([[
0, 0, 0, 1_069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 721 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self : str , a_ : Tuple , a_ : Optional[Any]=2 , a_ : str=32 , a_ : Dict=16 , a_ : List[str]=3 , a_ : Dict=True , a_ : Optional[int]=True , a_ : List[str]=32 , a_ : int=4 , a_ : str=[0, 1, 2, 3] , a_ : Any=4 , a_ : Optional[int]=37 , a_ : Any="gelu" , a_ : Optional[int]=0.1 , a_ : Optional[Any]=0.1 , a_ : Union[str, Any]=0.02 , a_ : Union[str, Any]=3 , a_ : Any=[1, 384, 24, 24] , a_ : Optional[Any]=True , a_ : Optional[int]=None , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = is_training
__snake_case = use_labels
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = backbone_out_indices
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = backbone_featmap_shape
__snake_case = scope
__snake_case = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__snake_case = (image_size // patch_size) ** 2
__snake_case = num_patches + 1
def A ( self : int ):
"""simple docstring"""
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case = self.get_config()
return config, pixel_values, labels
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [96, 192, 384, 768],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a_ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=a_ , backbone_featmap_shape=self.backbone_featmap_shape , )
def A ( self : int , a_ : Union[str, Any] , a_ : List[str] , a_ : List[str] ):
"""simple docstring"""
__snake_case = DPTModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : List[Any] , a_ : List[Any] , a_ : Union[str, Any] , a_ : List[str] ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = DPTForDepthEstimation(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def A ( self : Optional[Any] , a_ : List[str] , a_ : int , a_ : Tuple ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = DPTForSemanticSegmentation(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , labels=a_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE = (
{
"""depth-estimation""": DPTForDepthEstimation,
"""feature-extraction""": DPTModel,
"""image-segmentation""": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = DPTModelTester(self )
__snake_case = ConfigTester(self , config_class=a_ , has_text_modality=a_ , hidden_size=37 )
def A ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds" )
def A ( self : Any ):
"""simple docstring"""
pass
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_ , nn.Linear ) )
def A ( self : List[str] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a_ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a_ )
def A ( self : int ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*a_ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
def A ( self : Optional[int] ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = True
if model_class in get_values(a_ ):
continue
__snake_case = model_class(a_ )
model.to(a_ )
model.train()
__snake_case = self._prepare_for_class(a_ , a_ , return_labels=a_ )
__snake_case = model(**a_ ).loss
loss.backward()
def A ( self : int ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = False
__snake_case = True
if model_class in get_values(a_ ) or not model_class.supports_gradient_checkpointing:
continue
__snake_case = model_class(a_ )
model.to(a_ )
model.gradient_checkpointing_enable()
model.train()
__snake_case = self._prepare_for_class(a_ , a_ , return_labels=a_ )
__snake_case = model(**a_ ).loss
loss.backward()
def A ( self : Dict ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = _config_zero_init(a_ )
for model_class in self.all_model_classes:
__snake_case = model_class(config=a_ )
# Skip the check for the backbone
__snake_case = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__snake_case = [f'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A ( self : Tuple ):
"""simple docstring"""
pass
@slow
def A ( self : int ):
"""simple docstring"""
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__snake_case = DPTModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def A ( self : int ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = "add"
with self.assertRaises(a_ ):
__snake_case = DPTForDepthEstimation(a_ )
def __UpperCAmelCase ( ) -> Union[str, Any]:
__snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : Dict ):
"""simple docstring"""
__snake_case = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas" )
__snake_case = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas" ).to(a_ )
__snake_case = prepare_img()
__snake_case = image_processor(images=a_ , return_tensors="pt" ).to(a_ )
# forward pass
with torch.no_grad():
__snake_case = model(**a_ )
__snake_case = outputs.predicted_depth
# verify the predicted depth
__snake_case = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , a_ )
__snake_case = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , a_ , atol=1e-4 ) )
| 680 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a : Dict = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Union[str, Any] = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
a : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 700 |
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
def A ( self : Any ):
"""simple docstring"""
return self.__class__(**{k: copy.deepcopy(a_ ) for k, v in self.__dict__.items()} )
| 680 | 0 |
'''simple docstring'''
import numpy as np
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any ):
"""simple docstring"""
__snake_case = (0, 0)
__snake_case = None
__snake_case = 0
__snake_case = 0
__snake_case = 0
def __eq__( self : Union[str, Any] , a_ : str ):
"""simple docstring"""
return self.position == cell.position
def A ( self : Any ):
"""simple docstring"""
print(self.position )
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Union[str, Any] , a_ : str=(5, 5) ):
"""simple docstring"""
__snake_case = np.zeros(a_ )
__snake_case = world_size[0]
__snake_case = world_size[1]
def A ( self : List[str] ):
"""simple docstring"""
print(self.w )
def A ( self : Optional[Any] , a_ : Any ):
"""simple docstring"""
__snake_case = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
__snake_case = cell.position[0]
__snake_case = cell.position[1]
__snake_case = []
for n in neughbour_cord:
__snake_case = current_x + n[0]
__snake_case = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
__snake_case = Cell()
__snake_case = (x, y)
__snake_case = cell
neighbours.append(a_ )
return neighbours
def __UpperCAmelCase ( _UpperCAmelCase : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : str ) -> str:
__snake_case = []
__snake_case = []
_open.append(_UpperCAmelCase )
while _open:
__snake_case = np.argmin([n.f for n in _open] )
__snake_case = _open[min_f]
_closed.append(_open.pop(_UpperCAmelCase ) )
if current == goal:
break
for n in world.get_neigbours(_UpperCAmelCase ):
for c in _closed:
if c == n:
continue
__snake_case = current.g + 1
__snake_case , __snake_case = n.position
__snake_case , __snake_case = goal.position
__snake_case = (ya - ya) ** 2 + (xa - xa) ** 2
__snake_case = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(_UpperCAmelCase )
__snake_case = []
while current.parent is not None:
path.append(current.position )
__snake_case = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
a : Any = Gridworld()
# Start position and goal
a : Tuple = Cell()
a : Dict = (0, 0)
a : Optional[int] = Cell()
a : List[str] = (4, 4)
print(F'''path from {start.position} to {goal.position}''')
a : Union[str, Any] = astar(world, start, goal)
# Just for visual reasons.
for i in s:
a : List[Any] = 1
print(world.w)
| 701 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
a : Optional[Any] = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : int ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = "A painting of a squirrel eating a burger "
__snake_case = torch.manual_seed(0 )
__snake_case = pipe(
prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a_ )
__snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained(a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = generator.manual_seed(0 )
__snake_case = pipe(
prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = "A painting of a squirrel eating a burger "
__snake_case = torch.manual_seed(0 )
__snake_case = pipe(
prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
__snake_case = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 680 | 0 |
'''simple docstring'''
from __future__ import annotations
a : List[str] = 8.988e9 # units = N * m^s * C^-2
def __UpperCAmelCase ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float ) -> dict[str, float]:
__snake_case = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if distance < 0:
raise ValueError("Distance cannot be negative" )
if force == 0:
__snake_case = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
__snake_case = abs(SCREAMING_SNAKE_CASE_ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
__snake_case = abs(SCREAMING_SNAKE_CASE_ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
__snake_case = (COULOMBS_CONSTANT * charge_product / abs(SCREAMING_SNAKE_CASE_ )) ** 0.5
return {"distance": distance}
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 |
'''simple docstring'''
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('''>=''', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
a : Any = get_logger(__name__)
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any]=0 ) -> Any:
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
with FSDP.state_dict_type(
_UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__snake_case = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__snake_case = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if accelerator.process_index == 0:
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__snake_case = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__snake_case = os.path.join(_UpperCAmelCase , F'''{MODEL_NAME}_{model_index}''' )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
logger.info(F'''Saving model to {ckpt_dir}''' )
__snake_case = {"model": state_dict}
dist_cp.save_state_dict(
state_dict=_UpperCAmelCase , storage_writer=dist_cp.FileSystemWriter(_UpperCAmelCase ) , planner=DefaultSavePlanner() , )
logger.info(F'''Model saved to {ckpt_dir}''' )
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : str=0 ) -> List[str]:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
_UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(_UpperCAmelCase ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"Set the `sync_module_states` flag to `True` so that model states are synced across processes when "
"initializing FSDP object" )
return
__snake_case = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Loading model from {input_model_file}''' )
__snake_case = torch.load(_UpperCAmelCase )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__snake_case = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Loading model from {input_model_file}''' )
__snake_case = torch.load(_UpperCAmelCase )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__snake_case = (
os.path.join(_UpperCAmelCase , F'''{MODEL_NAME}_{model_index}''' )
if F'''{MODEL_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading model from {ckpt_dir}''' )
__snake_case = {"model": model.state_dict()}
dist_cp.load_state_dict(
state_dict=_UpperCAmelCase , storage_reader=dist_cp.FileSystemReader(_UpperCAmelCase ) , planner=DefaultLoadPlanner() , )
__snake_case = state_dict["model"]
logger.info(F'''Model loaded from {ckpt_dir}''' )
model.load_state_dict(_UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple=0 ) -> Union[str, Any]:
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
with FSDP.state_dict_type(
_UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__snake_case = FSDP.optim_state_dict(_UpperCAmelCase , _UpperCAmelCase )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
__snake_case = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Saving Optimizer state to {output_optimizer_file}''' )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Optimizer state saved in {output_optimizer_file}''' )
else:
__snake_case = os.path.join(_UpperCAmelCase , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
logger.info(F'''Saving Optimizer state to {ckpt_dir}''' )
dist_cp.save_state_dict(
state_dict={"optimizer": optim_state} , storage_writer=dist_cp.FileSystemWriter(_UpperCAmelCase ) , planner=DefaultSavePlanner() , )
logger.info(F'''Optimizer state saved in {ckpt_dir}''' )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int]=0 ) -> Union[str, Any]:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
_UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__snake_case = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
__snake_case = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Loading Optimizer state from {input_optimizer_file}''' )
__snake_case = torch.load(_UpperCAmelCase )
logger.info(F'''Optimizer state loaded from {input_optimizer_file}''' )
else:
__snake_case = (
os.path.join(_UpperCAmelCase , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
if F'''{OPTIMIZER_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading Optimizer from {ckpt_dir}''' )
__snake_case = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="optimizer" , storage_reader=dist_cp.FileSystemReader(_UpperCAmelCase ) , )
__snake_case = optim_state["optimizer"]
logger.info(F'''Optimizer loaded from {ckpt_dir}''' )
__snake_case = FSDP.optim_state_dict_to_load(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
optimizer.load_state_dict(_UpperCAmelCase )
| 680 | 0 |
'''simple docstring'''
import math
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] = 0 , _UpperCAmelCase : int = 0 ) -> list:
__snake_case = end or len(SCREAMING_SNAKE_CASE_ )
for i in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__snake_case = i
__snake_case = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
__snake_case = array[temp_index - 1]
temp_index -= 1
__snake_case = temp_index_value
return array
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any ) -> None: # Max Heap
__snake_case = index
__snake_case = 2 * index + 1 # Left Node
__snake_case = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
__snake_case = left_index
if right_index < heap_size and array[largest] < array[right_index]:
__snake_case = right_index
if largest != index:
__snake_case = array[largest], array[index]
heapify(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( _UpperCAmelCase : Tuple ) -> list:
__snake_case = len(SCREAMING_SNAKE_CASE_ )
for i in range(n // 2 , -1 , -1 ):
heapify(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for i in range(n - 1 , 0 , -1 ):
__snake_case = array[0], array[i]
heapify(SCREAMING_SNAKE_CASE_ , 0 , SCREAMING_SNAKE_CASE_ )
return array
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : int ) -> int:
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple ) -> int:
__snake_case = low
__snake_case = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
__snake_case = array[j], array[i]
i += 1
def __UpperCAmelCase ( _UpperCAmelCase : Tuple ) -> list:
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return array
__snake_case = 2 * math.ceil(math.loga(len(SCREAMING_SNAKE_CASE_ ) ) )
__snake_case = 16
return intro_sort(SCREAMING_SNAKE_CASE_ , 0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple ) -> list:
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(SCREAMING_SNAKE_CASE_ )
max_depth -= 1
__snake_case = median_of_a(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , start + ((end - start) // 2) + 1 , end - 1 )
__snake_case = partition(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
intro_sort(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = p
return insertion_sort(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
a : Optional[Any] = input('''Enter numbers separated by a comma : ''').strip()
a : List[Any] = [float(item) for item in user_input.split(''',''')]
print(sort(unsorted))
| 703 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
__snake_case = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_UpperCAmelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 0 |
'''simple docstring'''
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE__ ( __A , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = ProphetNetTokenizer
__SCREAMING_SNAKE_CASE = False
def A ( self : Union[str, Any] ):
"""simple docstring"""
super().setUp()
__snake_case = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def A ( self : Any , a_ : List[Any] ):
"""simple docstring"""
__snake_case = 'UNwant\u00E9d,running'
__snake_case = 'unwanted, running'
return input_text, output_text
def A ( self : str ):
"""simple docstring"""
__snake_case = self.tokenizer_class(self.vocab_file )
__snake_case = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(a_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , [9, 6, 7, 12, 10, 11] )
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def A ( self : Any ):
"""simple docstring"""
__snake_case = BasicTokenizer(do_lower_case=a_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def A ( self : Dict ):
"""simple docstring"""
__snake_case = BasicTokenizer(do_lower_case=a_ , strip_accents=a_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = BasicTokenizer(do_lower_case=a_ , strip_accents=a_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def A ( self : Dict ):
"""simple docstring"""
__snake_case = BasicTokenizer(do_lower_case=a_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = BasicTokenizer(do_lower_case=a_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = BasicTokenizer(do_lower_case=a_ , strip_accents=a_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def A ( self : Any ):
"""simple docstring"""
__snake_case = BasicTokenizer(do_lower_case=a_ , strip_accents=a_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def A ( self : Dict ):
"""simple docstring"""
__snake_case = BasicTokenizer(do_lower_case=a_ , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__snake_case = {}
for i, token in enumerate(a_ ):
__snake_case = i
__snake_case = WordpieceTokenizer(vocab=a_ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
@require_torch
def A ( self : Any ):
"""simple docstring"""
__snake_case = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
__snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__snake_case = [1_037, 2_146, 20_423, 2_005, 7_680, 7_849, 3_989, 1_012, 102]
__snake_case = tokenizer(a_ , padding=a_ , return_tensors="pt" )
self.assertIsInstance(a_ , a_ )
__snake_case = list(batch.input_ids.numpy()[0] )
self.assertListEqual(a_ , a_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def A ( self : Dict ):
"""simple docstring"""
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def A ( self : Tuple ):
"""simple docstring"""
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def A ( self : Tuple ):
"""simple docstring"""
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
@slow
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
__snake_case = tokenizer.encode("sequence builders" , add_special_tokens=a_ )
__snake_case = tokenizer.encode("multi-sequence build" , add_special_tokens=a_ )
__snake_case = tokenizer.build_inputs_with_special_tokens(a_ )
__snake_case = tokenizer.build_inputs_with_special_tokens(a_ , a_ )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 704 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> str:
if number > 0:
raise ValueError("input must be a negative integer" )
__snake_case = len(bin(_UpperCAmelCase )[3:] )
__snake_case = bin(abs(_UpperCAmelCase ) - (1 << binary_number_length) )[3:]
__snake_case = (
(
"1"
+ "0" * (binary_number_length - len(_UpperCAmelCase ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 0 |
'''simple docstring'''
import pytest
import datasets
# Import fixture modules as plugins
a : Tuple = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : int ) -> Optional[Any]:
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ["integration", "unit"] ):
continue
item.add_marker(pytest.mark.unit )
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] ) -> Optional[int]:
config.addinivalue_line("markers" , "torchaudio_latest: mark test to run with torchaudio>=0.12" )
@pytest.fixture(autouse=__snake_case )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
__snake_case = tmp_path_factory.getbasetemp() / "cache"
__snake_case = test_hf_cache_home / "datasets"
__snake_case = test_hf_cache_home / "metrics"
__snake_case = test_hf_cache_home / "modules"
monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE" , str(__snake_case ) )
monkeypatch.setattr("datasets.config.HF_METRICS_CACHE" , str(__snake_case ) )
monkeypatch.setattr("datasets.config.HF_MODULES_CACHE" , str(__snake_case ) )
__snake_case = test_hf_datasets_cache / "downloads"
monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH" , str(__snake_case ) )
__snake_case = test_hf_datasets_cache / "downloads" / "extracted"
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(__snake_case ) )
@pytest.fixture(autouse=__snake_case , scope="session" )
def __UpperCAmelCase ( ) -> Tuple:
datasets.disable_progress_bar()
@pytest.fixture(autouse=__snake_case )
def __UpperCAmelCase ( _UpperCAmelCase : Dict ) -> int:
# don't take tests into account when counting downloads
monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS" , __snake_case )
@pytest.fixture
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> List[str]:
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr("sqlalchemy.util.deprecations.SILENCE_UBER_WARNING" , __snake_case )
| 705 |
'''simple docstring'''
from timeit import timeit
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> int:
if number < 0:
raise ValueError("the value of input must not be negative" )
__snake_case = 0
while number:
number &= number - 1
result += 1
return result
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> int:
if number < 0:
raise ValueError("the value of input must not be negative" )
__snake_case = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def __UpperCAmelCase ( ) -> None:
def do_benchmark(_UpperCAmelCase : int ) -> None:
__snake_case = "import __main__ as z"
print(F'''Benchmark when {number = }:''' )
print(F'''{get_set_bits_count_using_modulo_operator(_UpperCAmelCase ) = }''' )
__snake_case = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=_UpperCAmelCase )
print(F'''timeit() runs in {timing} seconds''' )
print(F'''{get_set_bits_count_using_brian_kernighans_algorithm(_UpperCAmelCase ) = }''' )
__snake_case = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=_UpperCAmelCase , )
print(F'''timeit() runs in {timing} seconds''' )
for number in (25, 37, 58, 0):
do_benchmark(_UpperCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 680 | 0 |
'''simple docstring'''
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def __UpperCAmelCase ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : List[str] ) -> int:
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def __UpperCAmelCase ( _UpperCAmelCase : Dict , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple=True ) -> Union[str, Any]:
model.train()
__snake_case = model(lowerCAmelCase__ )
__snake_case = F.mse_loss(lowerCAmelCase__ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowerCAmelCase__ )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any]=False ) -> Tuple:
set_seed(42 )
__snake_case = RegressionModel()
__snake_case = deepcopy(lowerCAmelCase__ )
__snake_case = RegressionDataset(length=80 )
__snake_case = DataLoader(lowerCAmelCase__ , batch_size=16 )
model.to(accelerator.device )
if sched:
__snake_case = AdamW(params=model.parameters() , lr=1E-3 )
__snake_case = AdamW(params=ddp_model.parameters() , lr=1E-3 )
__snake_case = LambdaLR(lowerCAmelCase__ , lr_lambda=lambda _UpperCAmelCase : epoch**0.65 )
__snake_case = LambdaLR(lowerCAmelCase__ , lr_lambda=lambda _UpperCAmelCase : epoch**0.65 )
# Make a copy of `model`
if sched:
__snake_case , __snake_case , __snake_case , __snake_case = accelerator.prepare(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
__snake_case , __snake_case = accelerator.prepare(lowerCAmelCase__ , lowerCAmelCase__ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def __UpperCAmelCase ( _UpperCAmelCase : List[str] ) -> str:
# Test when on a single CPU or GPU that the context manager does nothing
__snake_case , __snake_case , __snake_case = get_training_setup(lowerCAmelCase__ )
# Use a single batch
__snake_case , __snake_case = next(iter(lowerCAmelCase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__snake_case , __snake_case = accelerator.gather((ddp_input, ddp_target) )
__snake_case , __snake_case = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCAmelCase__ ):
step_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
# Sync grads
step_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
__snake_case = ddp_input[torch.randperm(len(lowerCAmelCase__ ) )]
def __UpperCAmelCase ( _UpperCAmelCase : Optional[int] ) -> Dict:
# Test on distributed setup that context manager behaves properly
__snake_case , __snake_case , __snake_case = get_training_setup(lowerCAmelCase__ )
# Use a single batch
__snake_case , __snake_case = next(iter(lowerCAmelCase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__snake_case , __snake_case = accelerator.gather((ddp_input, ddp_target) )
__snake_case , __snake_case = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCAmelCase__ ):
step_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
# Sync grads
step_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
__snake_case = ddp_input[torch.randperm(len(lowerCAmelCase__ ) )]
def __UpperCAmelCase ( _UpperCAmelCase : List[str]=False , _UpperCAmelCase : int=False ) -> int:
__snake_case = Accelerator(
split_batches=lowerCAmelCase__ , dispatch_batches=lowerCAmelCase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__snake_case , __snake_case , __snake_case = get_training_setup(lowerCAmelCase__ )
for iteration, batch in enumerate(lowerCAmelCase__ ):
__snake_case , __snake_case = batch.values()
# Gather the distributed inputs and targs for the base model
__snake_case , __snake_case = accelerator.gather((ddp_input, ddp_target) )
__snake_case , __snake_case = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowerCAmelCase__ ):
step_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowerCAmelCase__ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
__snake_case = ddp_input[torch.randperm(len(lowerCAmelCase__ ) )]
GradientState._reset_state()
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : Tuple=False ) -> Optional[Any]:
__snake_case = Accelerator(
split_batches=lowerCAmelCase__ , dispatch_batches=lowerCAmelCase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case = get_training_setup(lowerCAmelCase__ , lowerCAmelCase__ )
for iteration, batch in enumerate(lowerCAmelCase__ ):
__snake_case , __snake_case = batch.values()
# Gather the distributed inputs and targs for the base model
__snake_case , __snake_case = accelerator.gather((ddp_input, ddp_target) )
__snake_case , __snake_case = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowerCAmelCase__ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowerCAmelCase__ ):
step_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
__snake_case = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowerCAmelCase__ ))
if accelerator.num_processes > 1:
check_model_parameters(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
GradientState._reset_state()
def __UpperCAmelCase ( ) -> str:
__snake_case = Accelerator()
__snake_case = RegressionDataset(length=80 )
__snake_case = DataLoader(lowerCAmelCase__ , batch_size=16 )
__snake_case = RegressionDataset(length=96 )
__snake_case = DataLoader(lowerCAmelCase__ , batch_size=16 )
__snake_case , __snake_case = accelerator.prepare(lowerCAmelCase__ , lowerCAmelCase__ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowerCAmelCase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCAmelCase__ )
if iteration < len(lowerCAmelCase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowerCAmelCase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCAmelCase__ )
if batch_num < len(lowerCAmelCase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def __UpperCAmelCase ( ) -> Tuple:
__snake_case = Accelerator()
__snake_case = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(lowerCAmelCase__ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(lowerCAmelCase__ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(lowerCAmelCase__ , lowerCAmelCase__ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(lowerCAmelCase__ , lowerCAmelCase__ )
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 706 |
'''simple docstring'''
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
a : Dict = '''sshleifer/bart-tiny-random'''
a : str = '''patrickvonplaten/t5-tiny-random'''
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def A ( self : Union[str, Any] ):
"""simple docstring"""
return AutoConfig.from_pretrained(a_ )
def A ( self : str ):
"""simple docstring"""
__snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=a_ )
def A ( self : Dict ):
"""simple docstring"""
__snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=a_ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def A ( self : Dict ):
"""simple docstring"""
with self.assertRaises(a_ ):
create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=a_ , d=a_ )
| 680 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : int = logging.get_logger(__name__)
a : Optional[Any] = {
'''facebook/wav2vec2-base-960h''': '''https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json''',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class SCREAMING_SNAKE_CASE__ ( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE = '''wav2vec2'''
def __init__( self : Dict , a_ : Union[str, Any]=32 , a_ : Optional[int]=768 , a_ : Optional[int]=12 , a_ : Tuple=12 , a_ : int=3_072 , a_ : Optional[int]="gelu" , a_ : Any=0.1 , a_ : Dict=0.1 , a_ : Any=0.1 , a_ : Optional[int]=0.0 , a_ : Any=0.0 , a_ : Union[str, Any]=0.1 , a_ : int=0.1 , a_ : List[Any]=0.02 , a_ : Optional[Any]=1e-5 , a_ : List[str]="group" , a_ : Union[str, Any]="gelu" , a_ : Dict=(512, 512, 512, 512, 512, 512, 512) , a_ : Any=(5, 2, 2, 2, 2, 2, 2) , a_ : Optional[Any]=(10, 3, 3, 3, 3, 2, 2) , a_ : str=False , a_ : Optional[int]=128 , a_ : Tuple=16 , a_ : Dict=False , a_ : str=True , a_ : Optional[int]=0.05 , a_ : Any=10 , a_ : List[Any]=2 , a_ : List[str]=0.0 , a_ : Optional[int]=10 , a_ : Optional[int]=0 , a_ : Optional[Any]=320 , a_ : Optional[Any]=2 , a_ : Tuple=0.1 , a_ : List[Any]=100 , a_ : List[Any]=256 , a_ : List[str]=256 , a_ : Dict=0.1 , a_ : List[str]="sum" , a_ : List[Any]=False , a_ : Dict=False , a_ : str=256 , a_ : List[Any]=(512, 512, 512, 512, 1_500) , a_ : Dict=(5, 3, 3, 1, 1) , a_ : Union[str, Any]=(1, 2, 3, 1, 1) , a_ : Optional[int]=512 , a_ : Optional[Any]=0 , a_ : Optional[Any]=1 , a_ : Dict=2 , a_ : str=False , a_ : str=3 , a_ : Tuple=2 , a_ : Optional[int]=3 , a_ : Dict=None , a_ : Any=None , **a_ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**a_ , pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ )
__snake_case = hidden_size
__snake_case = feat_extract_norm
__snake_case = feat_extract_activation
__snake_case = list(a_ )
__snake_case = list(a_ )
__snake_case = list(a_ )
__snake_case = conv_bias
__snake_case = num_conv_pos_embeddings
__snake_case = num_conv_pos_embedding_groups
__snake_case = len(self.conv_dim )
__snake_case = num_hidden_layers
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = num_attention_heads
__snake_case = hidden_dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = feat_proj_dropout
__snake_case = final_dropout
__snake_case = layerdrop
__snake_case = layer_norm_eps
__snake_case = initializer_range
__snake_case = vocab_size
__snake_case = do_stable_layer_norm
__snake_case = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__snake_case = apply_spec_augment
__snake_case = mask_time_prob
__snake_case = mask_time_length
__snake_case = mask_time_min_masks
__snake_case = mask_feature_prob
__snake_case = mask_feature_length
__snake_case = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__snake_case = num_codevectors_per_group
__snake_case = num_codevector_groups
__snake_case = contrastive_logits_temperature
__snake_case = feat_quantizer_dropout
__snake_case = num_negatives
__snake_case = codevector_dim
__snake_case = proj_codevector_dim
__snake_case = diversity_loss_weight
# ctc loss
__snake_case = ctc_loss_reduction
__snake_case = ctc_zero_infinity
# adapter
__snake_case = add_adapter
__snake_case = adapter_kernel_size
__snake_case = adapter_stride
__snake_case = num_adapter_layers
__snake_case = output_hidden_size or hidden_size
__snake_case = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__snake_case = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__snake_case = list(a_ )
__snake_case = list(a_ )
__snake_case = list(a_ )
__snake_case = xvector_output_dim
@property
def A ( self : List[str] ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 707 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
a : Any = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """sequence-classification"""
def __init__( self : List[str] , a_ : str ):
"""simple docstring"""
if type(a_ ) == dict:
__snake_case = Namespace(**a_ )
__snake_case = glue_output_modes[hparams.task]
__snake_case = glue_tasks_num_labels[hparams.task]
super().__init__(a_ , a_ , self.mode )
def A ( self : Union[str, Any] , **a_ : List[Any] ):
"""simple docstring"""
return self.model(**a_ )
def A ( self : int , a_ : Optional[Any] , a_ : int ):
"""simple docstring"""
__snake_case = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__snake_case = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
__snake_case = self(**a_ )
__snake_case = outputs[0]
__snake_case = self.trainer.lr_schedulers[0]["scheduler"]
__snake_case = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = self.hparams
__snake_case = processors[args.task]()
__snake_case = processor.get_labels()
for mode in ["train", "dev"]:
__snake_case = self._feature_file(a_ )
if os.path.exists(a_ ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , a_ )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
__snake_case = (
processor.get_dev_examples(args.data_dir )
if mode == "dev"
else processor.get_train_examples(args.data_dir )
)
__snake_case = convert_examples_to_features(
a_ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("Saving features into cached file %s" , a_ )
torch.save(a_ , a_ )
def A ( self : Optional[int] , a_ : str , a_ : int , a_ : bool = False ):
"""simple docstring"""
__snake_case = "dev" if mode == "test" else mode
__snake_case = self._feature_file(a_ )
logger.info("Loading features from cached file %s" , a_ )
__snake_case = torch.load(a_ )
__snake_case = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
__snake_case = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
__snake_case = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
__snake_case = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
__snake_case = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(a_ , a_ , a_ , a_ ) , batch_size=a_ , shuffle=a_ , )
def A ( self : int , a_ : List[str] , a_ : Tuple ):
"""simple docstring"""
__snake_case = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__snake_case = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
__snake_case = self(**a_ )
__snake_case , __snake_case = outputs[:2]
__snake_case = logits.detach().cpu().numpy()
__snake_case = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def A ( self : Dict , a_ : Optional[int] ):
"""simple docstring"""
__snake_case = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item()
__snake_case = np.concatenate([x["pred"] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
__snake_case = np.argmax(a_ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
__snake_case = np.squeeze(a_ )
__snake_case = np.concatenate([x["target"] for x in outputs] , axis=0 )
__snake_case = [[] for _ in range(out_label_ids.shape[0] )]
__snake_case = [[] for _ in range(out_label_ids.shape[0] )]
__snake_case = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , a_ , a_ )}
__snake_case = dict(results.items() )
__snake_case = results
return ret, preds_list, out_label_list
def A ( self : Tuple , a_ : list ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case = self._eval_end(a_ )
__snake_case = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def A ( self : int , a_ : Tuple ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case = self._eval_end(a_ )
__snake_case = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def A ( a_ : str , a_ : Any ):
"""simple docstring"""
BaseTransformer.add_model_specific_args(a_ , a_ )
parser.add_argument(
"--max_seq_length" , default=128 , type=a_ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--task" , default="" , type=a_ , required=a_ , help="The GLUE task to run" , )
parser.add_argument(
"--gpus" , default=0 , type=a_ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
def __UpperCAmelCase ( ) -> Union[str, Any]:
__snake_case = argparse.ArgumentParser()
add_generic_args(_UpperCAmelCase , os.getcwd() )
__snake_case = GLUETransformer.add_model_specific_args(_UpperCAmelCase , os.getcwd() )
__snake_case = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
__snake_case = os.path.join(
"./results" , F'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' , )
os.makedirs(args.output_dir )
__snake_case = GLUETransformer(_UpperCAmelCase )
__snake_case = generic_train(_UpperCAmelCase , _UpperCAmelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
__snake_case = sorted(glob.glob(os.path.join(args.output_dir , "checkpoint-epoch=*.ckpt" ) , recursive=_UpperCAmelCase ) )
__snake_case = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_UpperCAmelCase )
if __name__ == "__main__":
main()
| 680 | 0 |
'''simple docstring'''
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def __UpperCAmelCase ( _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> List[str]:
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def __UpperCAmelCase ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : str=True ) -> Optional[int]:
model.train()
__snake_case = model(_snake_case )
__snake_case = F.mse_loss(_snake_case , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(_snake_case )
def __UpperCAmelCase ( _UpperCAmelCase : Any , _UpperCAmelCase : Tuple=False ) -> List[Any]:
set_seed(42 )
__snake_case = RegressionModel()
__snake_case = deepcopy(_snake_case )
__snake_case = RegressionDataset(length=80 )
__snake_case = DataLoader(_snake_case , batch_size=16 )
model.to(accelerator.device )
if sched:
__snake_case = AdamW(params=model.parameters() , lr=1E-3 )
__snake_case = AdamW(params=ddp_model.parameters() , lr=1E-3 )
__snake_case = LambdaLR(_snake_case , lr_lambda=lambda _UpperCAmelCase : epoch**0.65 )
__snake_case = LambdaLR(_snake_case , lr_lambda=lambda _UpperCAmelCase : epoch**0.65 )
# Make a copy of `model`
if sched:
__snake_case , __snake_case , __snake_case , __snake_case = accelerator.prepare(_snake_case , _snake_case , _snake_case , _snake_case )
else:
__snake_case , __snake_case = accelerator.prepare(_snake_case , _snake_case )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def __UpperCAmelCase ( _UpperCAmelCase : Dict ) -> Optional[int]:
__snake_case , __snake_case , __snake_case = get_training_setup(_snake_case )
# Use a single batch
__snake_case , __snake_case = next(iter(_snake_case ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__snake_case , __snake_case = accelerator.gather((ddp_input, ddp_target) )
__snake_case , __snake_case = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_snake_case , _snake_case , _snake_case , _snake_case )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_snake_case ):
step_model(_snake_case , _snake_case , _snake_case , _snake_case )
else:
# Sync grads
step_model(_snake_case , _snake_case , _snake_case , _snake_case )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(_snake_case , _snake_case , _snake_case , _snake_case )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
__snake_case = ddp_input[torch.randperm(len(_snake_case ) )]
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] ) -> Optional[Any]:
__snake_case , __snake_case , __snake_case = get_training_setup(_snake_case )
# Use a single batch
__snake_case , __snake_case = next(iter(_snake_case ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__snake_case , __snake_case = accelerator.gather((ddp_input, ddp_target) )
__snake_case , __snake_case = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_snake_case , _snake_case , _snake_case , _snake_case )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_snake_case ):
step_model(_snake_case , _snake_case , _snake_case , _snake_case )
else:
# Sync grads
step_model(_snake_case , _snake_case , _snake_case , _snake_case )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
__snake_case = ddp_input[torch.randperm(len(_snake_case ) )]
def __UpperCAmelCase ( _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : int=False ) -> Any:
__snake_case = Accelerator(
split_batches=_snake_case , dispatch_batches=_snake_case , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__snake_case , __snake_case , __snake_case = get_training_setup(_snake_case )
for iteration, batch in enumerate(_snake_case ):
__snake_case , __snake_case = batch.values()
# Gather the distributed inputs and targs for the base model
__snake_case , __snake_case = accelerator.gather((ddp_input, ddp_target) )
__snake_case , __snake_case = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(_snake_case ):
step_model(_snake_case , _snake_case , _snake_case , _snake_case )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(_snake_case ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
__snake_case = ddp_input[torch.randperm(len(_snake_case ) )]
GradientState._reset_state()
def __UpperCAmelCase ( _UpperCAmelCase : Any=False , _UpperCAmelCase : Dict=False ) -> Tuple:
__snake_case = Accelerator(
split_batches=_snake_case , dispatch_batches=_snake_case , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case = get_training_setup(_snake_case , _snake_case )
for iteration, batch in enumerate(_snake_case ):
__snake_case , __snake_case = batch.values()
# Gather the distributed inputs and targs for the base model
__snake_case , __snake_case = accelerator.gather((ddp_input, ddp_target) )
__snake_case , __snake_case = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_snake_case )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(_snake_case ):
step_model(_snake_case , _snake_case , _snake_case , _snake_case )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
__snake_case = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_snake_case ))
if accelerator.num_processes > 1:
check_model_parameters(_snake_case , _snake_case , _snake_case , _snake_case )
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
GradientState._reset_state()
def __UpperCAmelCase ( ) -> str:
__snake_case = Accelerator()
__snake_case = RegressionDataset(length=80 )
__snake_case = DataLoader(_snake_case , batch_size=16 )
__snake_case = RegressionDataset(length=96 )
__snake_case = DataLoader(_snake_case , batch_size=16 )
__snake_case , __snake_case = accelerator.prepare(_snake_case , _snake_case )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(_snake_case ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_snake_case )
if iteration < len(_snake_case ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(_snake_case ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_snake_case )
if batch_num < len(_snake_case ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def __UpperCAmelCase ( ) -> Optional[Any]:
__snake_case = Accelerator()
__snake_case = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(_snake_case )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(_snake_case )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(_snake_case , _snake_case )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(_snake_case , _snake_case )
def __UpperCAmelCase ( _UpperCAmelCase : Tuple ) -> str:
main()
if __name__ == "__main__":
main()
| 708 |
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("dataset_size" , [None, 4_00 * 2**20, 6_00 * 2**20] )
@pytest.mark.parametrize("input_in_memory_max_size" , ["default", 0, 1_00 * 2**20, 9_00 * 2**20] )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> int:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , "IN_MEMORY_MAX_SIZE" , _UpperCAmelCase )
__snake_case = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
__snake_case = dataset_size < in_memory_max_size
else:
__snake_case = False
__snake_case = is_small_dataset(_UpperCAmelCase )
assert result == expected
| 680 | 0 |
'''simple docstring'''
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict ) -> Optional[int]:
__snake_case = multiprocessing.Manager()
__snake_case = manager.list()
__snake_case = multiprocessing.Process(target=_A , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append("timed out" )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> List[Any]:
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
__snake_case = shutil.rmtree
__snake_case = os.rmdir
__snake_case = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
__snake_case = {}
with swallow_io():
with time_limit(_A ):
exec(_A , _A )
result.append("passed" )
except TimeoutException:
result.append("timed out" )
except BaseException as e:
result.append(F'''failed: {e}''' )
# Needed for cleaning up.
__snake_case = rmtree
__snake_case = rmdir
__snake_case = chdir
@contextlib.contextmanager
def __UpperCAmelCase ( _UpperCAmelCase : str ) -> Dict:
def signal_handler(_UpperCAmelCase : str , _UpperCAmelCase : Optional[int] ):
raise TimeoutException("Timed out!" )
signal.setitimer(signal.ITIMER_REAL , _A )
signal.signal(signal.SIGALRM , _A )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def __UpperCAmelCase ( ) -> Dict:
__snake_case = WriteOnlyStringIO()
with contextlib.redirect_stdout(_A ):
with contextlib.redirect_stderr(_A ):
with redirect_stdin(_A ):
yield
@contextlib.contextmanager
def __UpperCAmelCase ( ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as dirname:
with chdir(_A ):
yield dirname
class SCREAMING_SNAKE_CASE__ ( lowerCAmelCase__ ):
pass
class SCREAMING_SNAKE_CASE__ ( io.StringIO ):
def A ( self : Optional[Any] , *a_ : Dict , **a_ : str ):
"""simple docstring"""
raise OSError
def A ( self : int , *a_ : str , **a_ : Tuple ):
"""simple docstring"""
raise OSError
def A ( self : Any , *a_ : Optional[int] , **a_ : int ):
"""simple docstring"""
raise OSError
def A ( self : Union[str, Any] , *a_ : Dict , **a_ : Union[str, Any] ):
"""simple docstring"""
return False
class SCREAMING_SNAKE_CASE__ ( contextlib._RedirectStream ): # type: ignore
__SCREAMING_SNAKE_CASE = "stdin"
@contextlib.contextmanager
def __UpperCAmelCase ( _UpperCAmelCase : List[str] ) -> Any:
if root == ".":
yield
return
__snake_case = os.getcwd()
os.chdir(_A )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(_A )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any]=None ) -> List[Any]:
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
__snake_case = None
__snake_case = None
import os
__snake_case = "1"
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
import shutil
__snake_case = None
__snake_case = None
__snake_case = None
import subprocess
__snake_case = None # type: ignore
__snake_case = None
import sys
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
| 709 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : float ) -> float:
if edge <= 0 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("Length must be a positive." )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def __UpperCAmelCase ( _UpperCAmelCase : float ) -> float:
if edge <= 0 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("Length must be a positive." )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class SCREAMING_SNAKE_CASE__ ( _A ):
__SCREAMING_SNAKE_CASE = """sew-d"""
def __init__( self : List[str] , a_ : Tuple=32 , a_ : Optional[int]=768 , a_ : Tuple=12 , a_ : Optional[Any]=12 , a_ : int=3_072 , a_ : Tuple=2 , a_ : List[Any]=512 , a_ : Any=256 , a_ : Optional[int]=True , a_ : Dict=True , a_ : str=("p2c", "c2p") , a_ : List[Any]="layer_norm" , a_ : int="gelu_python" , a_ : Optional[int]=0.1 , a_ : Tuple=0.1 , a_ : List[Any]=0.1 , a_ : int=0.0 , a_ : Dict=0.1 , a_ : List[Any]=0.02 , a_ : Optional[int]=1e-7 , a_ : List[Any]=1e-5 , a_ : List[str]="group" , a_ : Optional[int]="gelu" , a_ : Tuple=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , a_ : str=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , a_ : Optional[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , a_ : Optional[Any]=False , a_ : Optional[int]=128 , a_ : Tuple=16 , a_ : Optional[int]=True , a_ : Dict=0.05 , a_ : str=10 , a_ : Tuple=2 , a_ : Dict=0.0 , a_ : Dict=10 , a_ : Union[str, Any]=0 , a_ : List[Any]="mean" , a_ : int=False , a_ : Optional[int]=False , a_ : Optional[int]=256 , a_ : List[str]=0 , a_ : Union[str, Any]=1 , a_ : List[Any]=2 , **a_ : str , ):
"""simple docstring"""
super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
__snake_case = hidden_size
__snake_case = feat_extract_norm
__snake_case = feat_extract_activation
__snake_case = list(UpperCamelCase__ )
__snake_case = list(UpperCamelCase__ )
__snake_case = list(UpperCamelCase__ )
__snake_case = conv_bias
__snake_case = num_conv_pos_embeddings
__snake_case = num_conv_pos_embedding_groups
__snake_case = len(self.conv_dim )
__snake_case = num_hidden_layers
__snake_case = intermediate_size
__snake_case = squeeze_factor
__snake_case = max_position_embeddings
__snake_case = position_buckets
__snake_case = share_att_key
__snake_case = relative_attention
__snake_case = norm_rel_ebd
__snake_case = list(UpperCamelCase__ )
__snake_case = hidden_act
__snake_case = num_attention_heads
__snake_case = hidden_dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = feat_proj_dropout
__snake_case = final_dropout
__snake_case = layer_norm_eps
__snake_case = feature_layer_norm_eps
__snake_case = initializer_range
__snake_case = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__snake_case = apply_spec_augment
__snake_case = mask_time_prob
__snake_case = mask_time_length
__snake_case = mask_time_min_masks
__snake_case = mask_feature_prob
__snake_case = mask_feature_length
__snake_case = mask_feature_min_masks
# ctc loss
__snake_case = ctc_loss_reduction
__snake_case = ctc_zero_infinity
# sequence classification
__snake_case = use_weighted_layer_sum
__snake_case = classifier_proj_size
@property
def A ( self : Union[str, Any] ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 710 |
'''simple docstring'''
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
a : Any = 6_378_137.0
a : List[Any] = 6_356_752.314_245
a : Dict = 6_378_137
def __UpperCAmelCase ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float ) -> float:
__snake_case = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
__snake_case = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
__snake_case = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
__snake_case = haversine_distance(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
__snake_case = (b_lata + b_lata) / 2
__snake_case = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
__snake_case = (sin(_UpperCAmelCase ) ** 2) * (cos(_UpperCAmelCase ) ** 2)
__snake_case = cos(sigma / 2 ) ** 2
__snake_case = (sigma - sin(_UpperCAmelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
__snake_case = (cos(_UpperCAmelCase ) ** 2) * (sin(_UpperCAmelCase ) ** 2)
__snake_case = sin(sigma / 2 ) ** 2
__snake_case = (sigma + sin(_UpperCAmelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 0 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a : Tuple = logging.get_logger(__name__)
a : Tuple = {'''vocab_file''': '''spiece.model'''}
a : int = {
'''vocab_file''': {
'''AI-Sweden/gpt-sw3-126m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-350m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-1.6b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-6.7b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-20b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model''',
}
}
a : str = {
'''AI-Sweden/gpt-sw3-126m''': 2_048,
'''AI-Sweden/gpt-sw3-350m''': 2_048,
'''AI-Sweden/gpt-sw3-1.6b''': 2_048,
'''AI-Sweden/gpt-sw3-6.7b''': 2_048,
'''AI-Sweden/gpt-sw3-20b''': 2_048,
}
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = ['''input_ids''', '''attention_mask''']
def __init__( self : int , a_ : Optional[int] , a_ : List[str]=False , a_ : str=False , a_ : Any=False , a_ : List[str]=None , a_ : str=None , a_ : Optional[Any]=None , a_ : Optional[int]=None , a_ : List[str] = None , **a_ : Optional[int] , ):
"""simple docstring"""
__snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
__snake_case = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
__snake_case = 'None'
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
__snake_case = '<|endoftext|>' if eos_token is None else eos_token
__snake_case = '<unk>' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
__snake_case = unk_token if pad_token is None else pad_token
__snake_case = eos_token if bos_token is None else bos_token
else:
__snake_case = '<pad>' if pad_token is None else pad_token
__snake_case = '<s>' if bos_token is None else bos_token
super().__init__(
do_lower_case=_A , remove_space=_A , keep_accents=_A , bos_token=_A , eos_token=_A , unk_token=_A , pad_token=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , )
__snake_case = do_lower_case
__snake_case = remove_space
__snake_case = keep_accents
__snake_case = vocab_file
__snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_A )
# Used for whitespace normalization in input texts
# fmt : off
__snake_case = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', ''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
__snake_case = re.compile(
f'''[{"".join(map(_A , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8_203] ) )}]''' )
def __getstate__( self : Any ):
"""simple docstring"""
__snake_case = self.__dict__.copy()
__snake_case = None
return state
def __setstate__( self : str , a_ : Any ):
"""simple docstring"""
__snake_case = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__snake_case = {}
__snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def A ( self : Union[str, Any] ):
"""simple docstring"""
return len(self.sp_model )
def A ( self : List[Any] , a_ : str ):
"""simple docstring"""
__snake_case = self.non_printing_characters_re.sub("" , _A )
# Normalize whitespaces
__snake_case = ''.join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
__snake_case = unicodedata.normalize("NFC" , _A )
return text
def A ( self : Optional[int] , a_ : List[Any] , **a_ : List[Any] ):
"""simple docstring"""
__snake_case = self.preprocess_text(_A )
return self.sp_model.encode(_A , out_type=_A )
def A ( self : Dict , a_ : Any ):
"""simple docstring"""
return self.sp_model.PieceToId(_A )
def A ( self : str , a_ : Dict ):
"""simple docstring"""
return self.sp_model.IdToPiece(_A )
@staticmethod
def A ( a_ : List[Any] ):
"""simple docstring"""
return out_string
def A ( self : Optional[int] , a_ : Any ):
"""simple docstring"""
__snake_case = []
__snake_case = ''
__snake_case = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_A ) + token
__snake_case = True
__snake_case = []
else:
current_sub_tokens.append(_A )
__snake_case = False
out_string += self.sp_model.decode(_A )
return out_string
def A ( self : str ):
"""simple docstring"""
__snake_case = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A ( self : List[str] , a_ : Tuple , a_ : Optional[int] = None ):
"""simple docstring"""
if not os.path.isdir(_A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__snake_case = os.path.join(
_A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _A )
elif not os.path.isfile(self.vocab_file ):
with open(_A , "wb" ) as fi:
__snake_case = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
def A ( self : Optional[Any] , a_ : Any , a_ : Tuple = False ):
"""simple docstring"""
if isinstance(_A , _A ):
__snake_case = self.preprocess_text(_A )
__snake_case = self.sp_model.encode(_A )
else:
__snake_case = [self.preprocess_text(_A ) for t in text]
__snake_case = self.sp_model.encode(_A )
if return_tensors is True or return_tensors == "pt":
__snake_case = torch.tensor(_A )
return token_ids
def A ( self : Any , a_ : List[Any] ):
"""simple docstring"""
return self.sp_model.decode(_A )
def A ( self : Optional[int] , a_ : List[str] ):
"""simple docstring"""
__snake_case = [f'''User: {text}''' if is_user else f'''Bot: {text}''' for is_user, text in conversation.iter_texts()]
__snake_case = (
f'''{self.eos_token}{self.bos_token}''' + f'''{self.bos_token}'''.join(_A ) + f'''{self.bos_token}Bot:'''
)
return self.encode(text=_A )
| 711 |
'''simple docstring'''
import math
import sys
import cva
import numpy as np
def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float ) -> np.ndarray:
# For applying gaussian function for each element in matrix.
__snake_case = math.sqrt(_UpperCAmelCase )
__snake_case = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> np.ndarray:
__snake_case = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : float ) -> np.ndarray:
# Creates a gaussian kernel of given dimension.
__snake_case = np.zeros((kernel_size, kernel_size) )
for i in range(0 , _UpperCAmelCase ):
for j in range(0 , _UpperCAmelCase ):
__snake_case = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_UpperCAmelCase , _UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : int , ) -> np.ndarray:
__snake_case = np.zeros(img.shape )
__snake_case = get_gauss_kernel(_UpperCAmelCase , _UpperCAmelCase )
__snake_case , __snake_case = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__snake_case = get_slice(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__snake_case = img_s - img_s[kernel_size // 2, kernel_size // 2]
__snake_case = vec_gaussian(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = np.multiply(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = np.multiply(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = np.sum(_UpperCAmelCase ) / np.sum(_UpperCAmelCase )
__snake_case = val
return imga
def __UpperCAmelCase ( _UpperCAmelCase : list ) -> tuple:
__snake_case = args[1] if args[1:] else "../image_data/lena.jpg"
__snake_case = float(args[2] ) if args[2:] else 1.0
__snake_case = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__snake_case = int(args[4] )
__snake_case = kernel_size + abs(kernel_size % 2 - 1 )
else:
__snake_case = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
a , a , a , a : Tuple = parse_args(sys.argv)
a : Tuple = cva.imread(filename, 0)
cva.imshow('''input image''', img)
a : Dict = img / 255
a : str = out.astype('''float32''')
a : Union[str, Any] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
a : Dict = out * 255
a : List[str] = np.uinta(out)
cva.imshow('''output image''', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 680 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : int=2_81_23 ) -> Optional[int]:
__snake_case = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
__snake_case = set()
__snake_case = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(__snake_case )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 712 |
'''simple docstring'''
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any , a_ : Dict , a_ : Union[str, Any] , a_ : Tuple ):
"""simple docstring"""
__snake_case = name
__snake_case = value
__snake_case = weight
def __repr__( self : Optional[int] ):
"""simple docstring"""
return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def A ( self : Any ):
"""simple docstring"""
return self.value
def A ( self : str ):
"""simple docstring"""
return self.name
def A ( self : int ):
"""simple docstring"""
return self.weight
def A ( self : Tuple ):
"""simple docstring"""
return self.value / self.weight
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
__snake_case = []
for i in range(len(_UpperCAmelCase ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> int:
__snake_case = sorted(_UpperCAmelCase , key=_UpperCAmelCase , reverse=_UpperCAmelCase )
__snake_case = []
__snake_case , __snake_case = 0.0, 0.0
for i in range(len(_UpperCAmelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __UpperCAmelCase ( ) -> Optional[Any]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 0 |
'''simple docstring'''
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 713 |
'''simple docstring'''
import os
from math import logaa
def __UpperCAmelCase ( _UpperCAmelCase : str = "base_exp.txt" ) -> int:
__snake_case = 0
__snake_case = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(_UpperCAmelCase ) , _UpperCAmelCase ) ) ):
__snake_case , __snake_case = list(map(_UpperCAmelCase , line.split("," ) ) )
if x * logaa(_UpperCAmelCase ) > largest:
__snake_case = x * logaa(_UpperCAmelCase )
__snake_case = i + 1
return result
if __name__ == "__main__":
print(solution())
| 680 | 0 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
a : Dict = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt''']
a : Optional[int] = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('''0.9.0'''):
raise Exception('''requires fairseq >= 0.9.0''')
logging.set_verbosity_info()
a : Union[str, Any] = logging.get_logger(__name__)
a : Optional[Any] = ''' Hello world! cécé herlolip'''
a : int = [
('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''),
('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''),
('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''),
('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''),
]
def __UpperCAmelCase ( _UpperCAmelCase ) -> Optional[int]:
__snake_case = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def __UpperCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
__snake_case = dct.pop(A__ )
__snake_case = val
def __UpperCAmelCase ( _UpperCAmelCase ) -> Optional[Any]:
__snake_case = torch.load(A__ , map_location="cpu" )
__snake_case = torch.hub.load("pytorch/fairseq" , "bart.large.cnn" ).eval()
hub_interface.model.load_state_dict(sd["model"] )
return hub_interface
def __UpperCAmelCase ( _UpperCAmelCase ) -> List[Any]:
__snake_case , __snake_case = emb.weight.shape
__snake_case = nn.Linear(A__ , A__ , bias=A__ )
__snake_case = emb.weight.data
return lin_layer
@torch.no_grad()
def __UpperCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None ) -> Dict:
if not os.path.exists(A__ ):
__snake_case = torch.hub.load("pytorch/fairseq" , A__ ).eval()
else:
__snake_case = load_xsum_checkpoint(A__ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
__snake_case = checkpoint_path.replace("." , "-" )
__snake_case = BartConfig.from_pretrained(A__ )
__snake_case = bart.encode(A__ ).unsqueeze(0 )
__snake_case = BartTokenizer.from_pretrained(A__ ).encode(A__ , return_tensors="pt" ).unsqueeze(0 )
if not torch.eq(A__ , A__ ).all():
raise ValueError(
F'''converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}''' )
if checkpoint_path == "bart.large.mnli":
__snake_case = bart.state_dict()
remove_ignore_keys_(A__ )
__snake_case = state_dict["model.decoder.embed_tokens.weight"]
for src, dest in mnli_rename_keys:
rename_key(A__ , A__ , A__ )
__snake_case = BartForSequenceClassification(A__ ).eval()
model.load_state_dict(A__ )
__snake_case = bart.predict("mnli" , A__ , return_logits=A__ )
__snake_case = model(A__ )[0] # logits
else: # no classification heads to worry about
__snake_case = bart.model.state_dict()
remove_ignore_keys_(A__ )
__snake_case = state_dict["decoder.embed_tokens.weight"]
__snake_case = bart.extract_features(A__ )
if hf_checkpoint_name == "facebook/bart-large":
__snake_case = BartModel(A__ ).eval()
model.load_state_dict(A__ )
__snake_case = model(A__ ).model[0]
else:
__snake_case = BartForConditionalGeneration(A__ ).eval() # an existing summarization ckpt
model.model.load_state_dict(A__ )
if hasattr(A__ , "lm_head" ):
__snake_case = make_linear_from_emb(model.model.shared )
__snake_case = model.model(A__ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F'''`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}''' )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("Some values in `fairseq_output` are different from `new_model_outputs`" )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
if __name__ == "__main__":
a : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum'''
)
a : Optional[int] = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 714 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
a : List[Any] = logging.get_logger(__name__)
a : Dict = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
a : Any = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
a : Optional[int] = {
'''facebook/blenderbot_small-90M''': 512,
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = BlenderbotSmallTokenizer
def __init__( self : List[Any] , a_ : Optional[int]=None , a_ : Dict=None , a_ : int="<|endoftext|>" , a_ : str="<|endoftext|>" , a_ : Any="<|endoftext|>" , a_ : Dict=False , a_ : Optional[Any]=True , **a_ : Dict , ):
"""simple docstring"""
super().__init__(
ByteLevelBPETokenizer(
vocab=a_ , merges=a_ , add_prefix_space=a_ , trim_offsets=a_ , ) , bos_token=a_ , eos_token=a_ , unk_token=a_ , **a_ , )
__snake_case = add_prefix_space
def A ( self : Dict , a_ : int , a_ : Union[str, Any]=None ):
"""simple docstring"""
__snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A ( self : str , a_ : List[int] , a_ : Optional[List[int]] = None ):
"""simple docstring"""
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 680 | 0 |
'''simple docstring'''
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = (CMStochasticIterativeScheduler,)
__SCREAMING_SNAKE_CASE = 10
def A ( self : Tuple , **a_ : Any ):
"""simple docstring"""
__snake_case = {
"num_train_timesteps": 201,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
config.update(**a_ )
return config
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = 10
__snake_case = self.get_scheduler_config()
__snake_case = self.scheduler_classes[0](**a_ )
scheduler.set_timesteps(a_ )
__snake_case = scheduler.timesteps[0]
__snake_case = scheduler.timesteps[1]
__snake_case = self.dummy_sample
__snake_case = 0.1 * sample
__snake_case = scheduler.step(a_ , a_ , a_ ).prev_sample
__snake_case = scheduler.step(a_ , a_ , a_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def A ( self : Dict ):
"""simple docstring"""
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=a_ )
def A ( self : str ):
"""simple docstring"""
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=a_ )
def A ( self : str ):
"""simple docstring"""
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config()
__snake_case = scheduler_class(**a_ )
__snake_case = 1
scheduler.set_timesteps(a_ )
__snake_case = scheduler.timesteps
__snake_case = torch.manual_seed(0 )
__snake_case = self.dummy_model()
__snake_case = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(a_ ):
# 1. scale model input
__snake_case = scheduler.scale_model_input(a_ , a_ )
# 2. predict noise residual
__snake_case = model(a_ , a_ )
# 3. predict previous sample x_t-1
__snake_case = scheduler.step(a_ , a_ , a_ , generator=a_ ).prev_sample
__snake_case = pred_prev_sample
__snake_case = torch.sum(torch.abs(a_ ) )
__snake_case = torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 192.7614 ) < 1e-2
assert abs(result_mean.item() - 0.2510 ) < 1e-3
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config()
__snake_case = scheduler_class(**a_ )
__snake_case = [106, 0]
scheduler.set_timesteps(timesteps=a_ )
__snake_case = scheduler.timesteps
__snake_case = torch.manual_seed(0 )
__snake_case = self.dummy_model()
__snake_case = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
__snake_case = scheduler.scale_model_input(a_ , a_ )
# 2. predict noise residual
__snake_case = model(a_ , a_ )
# 3. predict previous sample x_t-1
__snake_case = scheduler.step(a_ , a_ , a_ , generator=a_ ).prev_sample
__snake_case = pred_prev_sample
__snake_case = torch.sum(torch.abs(a_ ) )
__snake_case = torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 347.6357 ) < 1e-2
assert abs(result_mean.item() - 0.4527 ) < 1e-3
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config()
__snake_case = scheduler_class(**a_ )
__snake_case = [39, 30, 12, 15, 0]
with self.assertRaises(a_ , msg="`timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=a_ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config()
__snake_case = scheduler_class(**a_ )
__snake_case = [39, 30, 12, 1, 0]
__snake_case = len(a_ )
with self.assertRaises(a_ , msg="Can only pass one of `num_inference_steps` or `timesteps`." ):
scheduler.set_timesteps(num_inference_steps=a_ , timesteps=a_ )
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config()
__snake_case = scheduler_class(**a_ )
__snake_case = [scheduler.config.num_train_timesteps]
with self.assertRaises(
a_ , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=a_ )
| 715 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a : str = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
a : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 680 | 0 |
'''simple docstring'''
import functools
def A__ ( _UpperCAmelCase : Any , _UpperCAmelCase : Any ) -> int:
if not isinstance(a__ , a__ ) or not all(isinstance(a__ , a__ ) for day in days ):
raise ValueError("The parameter days should be a list of integers" )
if len(a__ ) != 3 or not all(isinstance(a__ , a__ ) for cost in costs ):
raise ValueError("The parameter costs should be a list of three integers" )
if len(a__ ) == 0:
return 0
if min(a__ ) <= 0:
raise ValueError("All days elements should be greater than 0" )
if max(a__ ) >= 3_66:
raise ValueError("All days elements should be less than 366" )
__snake_case = set(a__ )
@functools.cache
def dynamic_programming(_UpperCAmelCase : Tuple ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716 |
'''simple docstring'''
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
a : Optional[Any] = float('''nan''')
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any , a_ : Optional[int] ):
"""simple docstring"""
__snake_case = sys.stdout
__snake_case = open(a_ , "a" )
def __getattr__( self : str , a_ : List[Any] ):
"""simple docstring"""
return getattr(self.stdout , a_ )
def A ( self : Union[str, Any] , a_ : List[Any] ):
"""simple docstring"""
self.stdout.write(a_ )
# strip tqdm codes
self.file.write(re.sub(r"^.*\r" , "" , a_ , 0 , re.M ) )
def __UpperCAmelCase ( _UpperCAmelCase : int=80 , _UpperCAmelCase : Any=False ) -> Optional[int]:
__snake_case = []
# deal with critical env vars
__snake_case = ["CUDA_VISIBLE_DEVICES"]
for key in env_keys:
__snake_case = os.environ.get(_UpperCAmelCase , _UpperCAmelCase )
if val is not None:
cmd.append(F'''{key}={val}''' )
# python executable (not always needed if the script is executable)
__snake_case = sys.executable if full_python_path else sys.executable.split("/" )[-1]
cmd.append(_UpperCAmelCase )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
__snake_case = []
__snake_case = ""
while len(_UpperCAmelCase ) > 0:
current_line += F'''{cmd.pop(0 )} '''
if len(_UpperCAmelCase ) == 0 or len(_UpperCAmelCase ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(_UpperCAmelCase )
__snake_case = ""
return "\\\n".join(_UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Tuple:
# unwrap multi-line input
__snake_case = re.sub(R"[\\\n]+" , " " , args.base_cmd )
# remove --output_dir if any and set our own
__snake_case = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd )
args.base_cmd += F''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
__snake_case = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Any ) -> str:
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 1_00 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.2222_2222] )} , )
__snake_case = subprocess.run(_UpperCAmelCase , capture_output=_UpperCAmelCase , text=_UpperCAmelCase )
if verbose:
print("STDOUT" , result.stdout )
print("STDERR" , result.stderr )
# save the streams
__snake_case = variation.replace(" " , "-" )
with open(Path(_UpperCAmelCase ) / F'''log.{prefix}.stdout.txt''' , "w" ) as f:
f.write(result.stdout )
with open(Path(_UpperCAmelCase ) / F'''log.{prefix}.stderr.txt''' , "w" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("failed" )
return {target_metric_key: nan}
with io.open(F'''{output_dir}/all_results.json''' , "r" , encoding="utf-8" ) as f:
__snake_case = json.load(_UpperCAmelCase )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict , ) -> Dict:
__snake_case = []
__snake_case = []
__snake_case = F'''{id}: {variation:<{longest_variation_len}}'''
__snake_case = F'''{preamble}: '''
__snake_case = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(_UpperCAmelCase ) , desc=_UpperCAmelCase , leave=_UpperCAmelCase ):
__snake_case = process_run_single(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__snake_case = single_run_metrics[target_metric_key]
if not math.isnan(_UpperCAmelCase ):
metrics.append(_UpperCAmelCase )
results.append(_UpperCAmelCase )
outcome += "✓"
else:
outcome += "✘"
__snake_case = F'''\33[2K\r{outcome}'''
if len(_UpperCAmelCase ) > 0:
__snake_case = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
__snake_case = round(mean_metrics[target_metric_key] , 2 )
__snake_case = F'''{outcome} {mean_target}'''
if len(_UpperCAmelCase ) > 1:
results_str += F''' {tuple(round(_UpperCAmelCase , 2 ) for x in results )}'''
print(_UpperCAmelCase )
__snake_case = variation
return mean_metrics
else:
print(_UpperCAmelCase )
return {variation_key: variation, target_metric_key: nan}
def __UpperCAmelCase ( ) -> Optional[int]:
__snake_case = torch.cuda.get_device_properties(torch.device("cuda" ) )
return F'''
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
'''
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple ) -> List[Any]:
__snake_case = pd.DataFrame(_UpperCAmelCase )
__snake_case = "variation"
__snake_case = "diff_%"
__snake_case = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
__snake_case = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(_UpperCAmelCase ):
# as a fallback, use the minimal value as the sentinel
__snake_case = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(_UpperCAmelCase ):
__snake_case = df.apply(
lambda _UpperCAmelCase : round(1_00 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="columns" , )
# re-order columns
__snake_case = [variation_key, target_metric_key, diff_key, *report_metric_keys]
__snake_case = df.reindex(_UpperCAmelCase , axis="columns" ) # reorder cols
# capitalize
__snake_case = df.rename(str.capitalize , axis="columns" )
# make the cols as narrow as possible
__snake_case = df.rename(lambda _UpperCAmelCase : c.replace("_" , "<br>" ) , axis="columns" )
__snake_case = df.rename(lambda _UpperCAmelCase : c.replace("_" , "\n" ) , axis="columns" )
__snake_case = ["", "Copy between the cut-here-lines and paste as is to github or a forum"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=_UpperCAmelCase , floatfmt=".2f" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=_UpperCAmelCase , floatfmt=".2f" )]
print("\n\n".join(_UpperCAmelCase ) )
def __UpperCAmelCase ( ) -> Dict:
__snake_case = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Base cmd" , )
parser.add_argument(
"--variations" , default=_UpperCAmelCase , type=_UpperCAmelCase , nargs="+" , required=_UpperCAmelCase , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , )
parser.add_argument(
"--base-variation" , default=_UpperCAmelCase , type=_UpperCAmelCase , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , )
parser.add_argument(
"--target-metric-key" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , )
parser.add_argument(
"--report-metric-keys" , default="" , type=_UpperCAmelCase , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , )
parser.add_argument(
"--repeat-times" , default=1 , type=_UpperCAmelCase , help="How many times to re-run each variation - an average will be reported" , )
parser.add_argument(
"--output_dir" , default="output_benchmark" , type=_UpperCAmelCase , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , )
parser.add_argument(
"--verbose" , default=_UpperCAmelCase , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , )
__snake_case = parser.parse_args()
__snake_case = args.output_dir
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
__snake_case = get_base_command(_UpperCAmelCase , _UpperCAmelCase )
# split each dimension into its --foo variations
__snake_case = [list(map(str.strip , re.split(R"\|" , _UpperCAmelCase ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
__snake_case = list(map(str.strip , map(" ".join , itertools.product(*_UpperCAmelCase ) ) ) )
__snake_case = max(len(_UpperCAmelCase ) for x in variations )
# split wanted keys
__snake_case = args.report_metric_keys.split()
# capture prints into a log file for convenience
__snake_case = F'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'''
print(F'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(F'''and this script\'s output is also piped into {report_fn}''' )
__snake_case = Tee(_UpperCAmelCase )
print(F'''\n*** Running {len(_UpperCAmelCase )} benchmarks:''' )
print(F'''Base command: {" ".join(_UpperCAmelCase )}''' )
__snake_case = "variation"
__snake_case = []
for id, variation in enumerate(tqdm(_UpperCAmelCase , desc="Total completion: " , leave=_UpperCAmelCase ) ):
__snake_case = base_cmd + variation.split()
results.append(
process_run(
id + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , args.target_metric_key , _UpperCAmelCase , args.repeat_times , _UpperCAmelCase , args.verbose , ) )
process_results(_UpperCAmelCase , args.target_metric_key , _UpperCAmelCase , args.base_variation , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 680 | 0 |
'''simple docstring'''
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
a : int = logging.get_logger(__name__)
a : Any = r'''\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n'''
class SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
@add_start_docstrings(lowerCAmelCase_ )
def __call__( self : Optional[int] , a_ : Tuple , a_ : Tuple , **a_ : Optional[Any] ):
"""simple docstring"""
raise NotImplementedError("StoppingCriteria needs to be subclassed" )
class SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
def __init__( self : str , a_ : int , a_ : List[Any] = None ):
"""simple docstring"""
__snake_case = max_length
__snake_case = max_position_embeddings
@add_start_docstrings(lowerCAmelCase_ )
def __call__( self : Any , a_ : List[str] , a_ : Optional[int] , **a_ : List[str] ):
"""simple docstring"""
__snake_case = input_ids.shape[-1]
__snake_case = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"This is a friendly reminder - the current text generation call will exceed the model's predefined "
f'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe '''
"exceptions, performance degradation, or nothing at all." )
return is_done
class SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
def __init__( self : Optional[Any] , a_ : Tuple , a_ : str ):
"""simple docstring"""
warnings.warn(
"The class `MaxNewTokensCriteria` is deprecated. "
f'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` '''
"with `max_length = start_length + max_new_tokens` instead." , lowerCAmelCase_ , )
__snake_case = start_length
__snake_case = max_new_tokens
__snake_case = start_length + max_new_tokens
@add_start_docstrings(lowerCAmelCase_ )
def __call__( self : Any , a_ : Optional[Any] , a_ : int , **a_ : List[str] ):
"""simple docstring"""
return input_ids.shape[-1] >= self.max_length
class SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
def __init__( self : List[Any] , a_ : Optional[int] , a_ : int = None ):
"""simple docstring"""
__snake_case = max_time
__snake_case = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(lowerCAmelCase_ )
def __call__( self : List[str] , a_ : Optional[Any] , a_ : List[Any] , **a_ : Dict ):
"""simple docstring"""
return time.time() - self.initial_timestamp > self.max_time
class SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
@add_start_docstrings(lowerCAmelCase_ )
def __call__( self : Tuple , a_ : int , a_ : Optional[Any] , **a_ : Optional[Any] ):
"""simple docstring"""
return any(criteria(lowerCAmelCase_ , lowerCAmelCase_ ) for criteria in self )
@property
def A ( self : Tuple ):
"""simple docstring"""
for stopping_criterium in self:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return stopping_criterium.max_length
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return stopping_criterium.max_length
return None
def __UpperCAmelCase ( _UpperCAmelCase : StoppingCriteriaList , _UpperCAmelCase : int ) -> StoppingCriteriaList:
__snake_case = stopping_criteria.max_length
__snake_case = deepcopy(snake_case__ )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , snake_case__ )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=snake_case__ ) )
return new_stopping_criteria
| 717 |
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def __UpperCAmelCase ( _UpperCAmelCase : Dict ) -> int: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def __UpperCAmelCase ( ) -> Dict:
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
__snake_case = [1, 2, 3]
with pytest.raises(_UpperCAmelCase ):
with parallel_backend("unsupported backend" ):
map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=2 )
with pytest.raises(_UpperCAmelCase ):
with parallel_backend("unsupported backend" ):
map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" , [2, -1] )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] ) -> Optional[int]:
__snake_case = [1, 2]
__snake_case = {"a": 1, "b": 2}
__snake_case = {"a": [1, 2], "b": [3, 4]}
__snake_case = {"a": {"1": 1}, "b": 2}
__snake_case = {"a": 1, "b": 2, "c": 3, "d": 4}
__snake_case = [2, 3]
__snake_case = {"a": 2, "b": 3}
__snake_case = {"a": [2, 3], "b": [4, 5]}
__snake_case = {"a": {"1": 2}, "b": 3}
__snake_case = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
| 680 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
a : Any = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ['input_features', 'attention_mask']
def __init__( self : List[str] , a_ : List[str]=80 , a_ : str=16_000 , a_ : int=0.0 , a_ : Union[str, Any]=10 , a_ : str=25 , a_ : List[str]="hamming_window" , a_ : Union[str, Any]=32_768.0 , a_ : List[str]=0.97 , a_ : Optional[Any]=1.0 , a_ : Tuple=True , a_ : Dict=True , a_ : Optional[Any]=False , **a_ : Optional[int] , ):
"""simple docstring"""
super().__init__(feature_size=UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , padding_value=UpperCAmelCase__ , **UpperCAmelCase__ )
__snake_case = feature_size
__snake_case = sampling_rate
__snake_case = padding_value
__snake_case = hop_length
__snake_case = win_length
__snake_case = frame_signal_scale
__snake_case = preemphasis_coeff
__snake_case = mel_floor
__snake_case = normalize_means
__snake_case = normalize_vars
__snake_case = win_function
__snake_case = return_attention_mask
__snake_case = win_length * sampling_rate // 1_000
__snake_case = hop_length * sampling_rate // 1_000
__snake_case = optimal_fft_length(self.sample_size )
__snake_case = (self.n_fft // 2) + 1
def A ( self : Tuple , a_ : np.array ):
"""simple docstring"""
if self.win_function == "hamming_window":
__snake_case = window_function(window_length=self.sample_size , name=self.win_function , periodic=UpperCAmelCase__ )
else:
__snake_case = window_function(window_length=self.sample_size , name=self.win_function )
__snake_case = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
__snake_case = spectrogram(
one_waveform * self.frame_signal_scale , window=UpperCAmelCase__ , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=UpperCAmelCase__ , preemphasis=self.preemphasis_coeff , mel_filters=UpperCAmelCase__ , mel_floor=self.mel_floor , log_mel="log" , )
return msfc_features.T
def A ( self : str , a_ : str , a_ : Dict , a_ : Tuple ):
"""simple docstring"""
if self.normalize_means:
__snake_case = x[:input_length].mean(axis=0 )
__snake_case = np.subtract(UpperCAmelCase__ , UpperCAmelCase__ )
if self.normalize_vars:
__snake_case = x[:input_length].std(axis=0 )
__snake_case = np.divide(UpperCAmelCase__ , UpperCAmelCase__ )
if input_length < x.shape[0]:
__snake_case = padding_value
# make sure array is in float32
__snake_case = x.astype(np.floataa )
return x
def A ( self : Dict , a_ : List[np.ndarray] , a_ : Optional[np.ndarray] = None ):
"""simple docstring"""
__snake_case = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(UpperCAmelCase__ , UpperCAmelCase__ , self.padding_value ) for x, n in zip(UpperCAmelCase__ , UpperCAmelCase__ )]
def __call__( self : Optional[Any] , a_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a_ : Union[bool, str, PaddingStrategy] = False , a_ : Optional[int] = None , a_ : bool = False , a_ : Optional[int] = None , a_ : Optional[bool] = None , a_ : Optional[Union[str, TensorType]] = None , a_ : Optional[int] = None , **a_ : Optional[Any] , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
__snake_case = isinstance(UpperCAmelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
__snake_case = is_batched_numpy or (
isinstance(UpperCAmelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__snake_case = [np.asarray(UpperCAmelCase__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCAmelCase__ , np.ndarray ):
__snake_case = np.asarray(UpperCAmelCase__ , dtype=np.floataa )
elif isinstance(UpperCAmelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__snake_case = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__snake_case = [raw_speech]
# extract fbank features
__snake_case = [self._extract_mfsc_features(UpperCAmelCase__ ) for one_waveform in raw_speech]
# convert into correct format for padding
__snake_case = BatchFeature({"input_features": features} )
__snake_case = self.pad(
UpperCAmelCase__ , padding=UpperCAmelCase__ , max_length=UpperCAmelCase__ , truncation=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , )
# make sure list is in array format
__snake_case = padded_inputs.get("input_features" )
if isinstance(input_features[0] , UpperCAmelCase__ ):
__snake_case = [np.asarray(UpperCAmelCase__ , dtype=np.floataa ) for feature in input_features]
__snake_case = padded_inputs.get("attention_mask" )
if attention_mask is not None:
__snake_case = [np.asarray(UpperCAmelCase__ , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
__snake_case = (
np.array(UpperCAmelCase__ , dtype=np.intaa )
if self._get_padding_strategies(UpperCAmelCase__ , max_length=UpperCAmelCase__ ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
__snake_case = self.normalize(
padded_inputs["input_features"] , attention_mask=UpperCAmelCase__ )
if return_tensors is not None:
__snake_case = padded_inputs.convert_to_tensors(UpperCAmelCase__ )
return padded_inputs
| 718 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : int = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """mobilenet_v2"""
def __init__( self : Tuple , a_ : int=3 , a_ : int=224 , a_ : List[Any]=1.0 , a_ : List[str]=8 , a_ : Dict=8 , a_ : Optional[Any]=6 , a_ : Optional[Any]=32 , a_ : str=True , a_ : Union[str, Any]=True , a_ : List[Any]="relu6" , a_ : Optional[Any]=True , a_ : Any=0.8 , a_ : Dict=0.02 , a_ : Optional[int]=0.001 , a_ : Optional[int]=255 , **a_ : List[str] , ):
"""simple docstring"""
super().__init__(**a_ )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
__snake_case = num_channels
__snake_case = image_size
__snake_case = depth_multiplier
__snake_case = depth_divisible_by
__snake_case = min_depth
__snake_case = expand_ratio
__snake_case = output_stride
__snake_case = first_layer_is_expansion
__snake_case = finegrained_output
__snake_case = hidden_act
__snake_case = tf_padding
__snake_case = classifier_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = semantic_loss_ignore_index
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = version.parse("""1.11""" )
@property
def A ( self : Optional[int] ):
"""simple docstring"""
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def A ( self : Optional[int] ):
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def A ( self : int ):
"""simple docstring"""
return 1e-4
| 680 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a : str = {
"""configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""],
"""processing_layoutlmv2""": ["""LayoutLMv2Processor"""],
"""tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Union[str, Any] = ["""LayoutLMv2TokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Union[str, Any] = ["""LayoutLMv2FeatureExtractor"""]
a : str = ["""LayoutLMv2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = [
"""LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv2ForQuestionAnswering""",
"""LayoutLMv2ForSequenceClassification""",
"""LayoutLMv2ForTokenClassification""",
"""LayoutLMv2Layer""",
"""LayoutLMv2Model""",
"""LayoutLMv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 719 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : List[Any] = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """data2vec-text"""
def __init__( self : List[str] , a_ : str=30_522 , a_ : Optional[int]=768 , a_ : Dict=12 , a_ : int=12 , a_ : Dict=3_072 , a_ : Dict="gelu" , a_ : Optional[Any]=0.1 , a_ : List[str]=0.1 , a_ : int=512 , a_ : Any=2 , a_ : int=0.02 , a_ : Dict=1e-12 , a_ : Dict=1 , a_ : Any=0 , a_ : Dict=2 , a_ : Optional[int]="absolute" , a_ : List[Any]=True , a_ : Dict=None , **a_ : List[str] , ):
"""simple docstring"""
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = position_embedding_type
__snake_case = use_cache
__snake_case = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
@property
def A ( self : Any ):
"""simple docstring"""
if self.task == "multiple-choice":
__snake_case = {0: "batch", 1: "choice", 2: "sequence"}
else:
__snake_case = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 680 | 0 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
a : Union[str, Any] = '''CompVis/stable-diffusion-v1-1'''
a : List[Any] = '''CompVis/stable-diffusion-v1-2'''
a : List[str] = '''CompVis/stable-diffusion-v1-3'''
a : Tuple = '''CompVis/stable-diffusion-v1-4'''
class SCREAMING_SNAKE_CASE__ ( __lowerCamelCase ):
def __init__( self : List[str] , a_ : Union[str, Any] , a_ : Optional[int] , a_ : List[str] , a_ : Tuple , a_ : str , a_ : Optional[Any] , a_ : Union[str, Any] , a_ : int = True , ):
"""simple docstring"""
super()._init_()
__snake_case = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ )
__snake_case = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ )
__snake_case = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ )
__snake_case = StableDiffusionPipeline(
vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , requires_safety_checker=SCREAMING_SNAKE_CASE_ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def A ( self : List[Any] ):
"""simple docstring"""
return {k: getattr(self , SCREAMING_SNAKE_CASE_ ) for k in self.config.keys() if not k.startswith("_" )}
def A ( self : str , a_ : List[str] = "auto" ):
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__snake_case = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE_ )
def A ( self : int ):
"""simple docstring"""
self.enable_attention_slicing(SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def A ( self : Tuple , a_ : Dict , a_ : Optional[Any] = 512 , a_ : Union[str, Any] = 512 , a_ : int = 50 , a_ : Any = 7.5 , a_ : List[Any] = None , a_ : int = 1 , a_ : Optional[Any] = 0.0 , a_ : Tuple = None , a_ : Union[str, Any] = None , a_ : Any = "pil" , a_ : str = True , a_ : Union[str, Any] = None , a_ : List[str] = 1 , **a_ : Union[str, Any] , ):
"""simple docstring"""
return self.pipea(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
@torch.no_grad()
def A ( self : List[str] , a_ : Optional[Any] , a_ : Optional[int] = 512 , a_ : Tuple = 512 , a_ : Union[str, Any] = 50 , a_ : Tuple = 7.5 , a_ : Optional[int] = None , a_ : Optional[Any] = 1 , a_ : List[Any] = 0.0 , a_ : Any = None , a_ : List[Any] = None , a_ : Union[str, Any] = "pil" , a_ : Dict = True , a_ : Any = None , a_ : List[Any] = 1 , **a_ : int , ):
"""simple docstring"""
return self.pipea(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
@torch.no_grad()
def A ( self : Union[str, Any] , a_ : Any , a_ : int = 512 , a_ : Tuple = 512 , a_ : Optional[int] = 50 , a_ : Dict = 7.5 , a_ : Optional[Any] = None , a_ : int = 1 , a_ : Any = 0.0 , a_ : Optional[int] = None , a_ : str = None , a_ : Tuple = "pil" , a_ : int = True , a_ : Tuple = None , a_ : Dict = 1 , **a_ : List[Any] , ):
"""simple docstring"""
return self.pipea(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
@torch.no_grad()
def A ( self : List[Any] , a_ : str , a_ : Union[str, Any] = 512 , a_ : str = 512 , a_ : Dict = 50 , a_ : str = 7.5 , a_ : Optional[Any] = None , a_ : List[str] = 1 , a_ : Union[str, Any] = 0.0 , a_ : List[Any] = None , a_ : Dict = None , a_ : List[str] = "pil" , a_ : str = True , a_ : Dict = None , a_ : str = 1 , **a_ : Optional[Any] , ):
"""simple docstring"""
return self.pipea(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
@torch.no_grad()
def A ( self : Any , a_ : Union[str, Any] , a_ : str = 512 , a_ : Dict = 512 , a_ : Union[str, Any] = 50 , a_ : Tuple = 7.5 , a_ : Optional[int] = None , a_ : Union[str, Any] = 1 , a_ : List[str] = 0.0 , a_ : Tuple = None , a_ : Optional[Any] = None , a_ : List[str] = "pil" , a_ : List[Any] = True , a_ : Union[str, Any] = None , a_ : Union[str, Any] = 1 , **a_ : Dict , ):
"""simple docstring"""
__snake_case = "cuda" if torch.cuda.is_available() else "cpu"
self.to(SCREAMING_SNAKE_CASE_ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` must be divisible by 8 but are {height} and {width}.''' )
# Get first result from Stable Diffusion Checkpoint v1.1
__snake_case = self.textaimg_sda_a(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# Get first result from Stable Diffusion Checkpoint v1.2
__snake_case = self.textaimg_sda_a(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# Get first result from Stable Diffusion Checkpoint v1.3
__snake_case = self.textaimg_sda_a(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# Get first result from Stable Diffusion Checkpoint v1.4
__snake_case = self.textaimg_sda_a(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 720 |
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
a : Tuple = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def A ( self : Union[str, Any] , a_ : List[str] , a_ : Optional[int] , a_ : List[str]=None , a_ : Any=None ):
"""simple docstring"""
__snake_case = self.layer[current_layer](a_ , a_ , head_mask[current_layer] )
__snake_case = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"""The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.""" , _UpperCamelCase , )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def __init__( self : int , a_ : int ):
"""simple docstring"""
super().__init__(a_ )
__snake_case = BertEncoderWithPabee(a_ )
self.init_weights()
__snake_case = 0
__snake_case = 0
__snake_case = 0
__snake_case = 0
def A ( self : Optional[int] , a_ : Union[str, Any] ):
"""simple docstring"""
__snake_case = threshold
def A ( self : Optional[Any] , a_ : Union[str, Any] ):
"""simple docstring"""
__snake_case = patience
def A ( self : Any ):
"""simple docstring"""
__snake_case = 0
__snake_case = 0
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.inference_layers_num / self.inference_instances_num
__snake_case = (
f'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
f''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(a_ )
@add_start_docstrings_to_model_forward(a_ )
def A ( self : Dict , a_ : Optional[Any]=None , a_ : Union[str, Any]=None , a_ : int=None , a_ : Optional[int]=None , a_ : int=None , a_ : Optional[Any]=None , a_ : Union[str, Any]=None , a_ : int=None , a_ : Any=None , a_ : Optional[Any]=None , a_ : Any=False , ):
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
__snake_case = input_ids.size()
elif inputs_embeds is not None:
__snake_case = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
__snake_case = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__snake_case = torch.ones(a_ , device=a_ )
if token_type_ids is None:
__snake_case = torch.zeros(a_ , dtype=torch.long , device=a_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__snake_case = self.get_extended_attention_mask(a_ , a_ , a_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
__snake_case , __snake_case , __snake_case = encoder_hidden_states.size()
__snake_case = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
__snake_case = torch.ones(a_ , device=a_ )
__snake_case = self.invert_attention_mask(a_ )
else:
__snake_case = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__snake_case = self.get_head_mask(a_ , self.config.num_hidden_layers )
__snake_case = self.embeddings(
input_ids=a_ , position_ids=a_ , token_type_ids=a_ , inputs_embeds=a_ )
__snake_case = embedding_output
if self.training:
__snake_case = []
for i in range(self.config.num_hidden_layers ):
__snake_case = self.encoder.adaptive_forward(
a_ , current_layer=a_ , attention_mask=a_ , head_mask=a_ )
__snake_case = self.pooler(a_ )
__snake_case = output_layers[i](output_dropout(a_ ) )
res.append(a_ )
elif self.patience == 0: # Use all layers for inference
__snake_case = self.encoder(
a_ , attention_mask=a_ , head_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , )
__snake_case = self.pooler(encoder_outputs[0] )
__snake_case = [output_layers[self.config.num_hidden_layers - 1](a_ )]
else:
__snake_case = 0
__snake_case = None
__snake_case = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
__snake_case = self.encoder.adaptive_forward(
a_ , current_layer=a_ , attention_mask=a_ , head_mask=a_ )
__snake_case = self.pooler(a_ )
__snake_case = output_layers[i](a_ )
if regression:
__snake_case = logits.detach()
if patient_result is not None:
__snake_case = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
__snake_case = 0
else:
__snake_case = logits.detach().argmax(dim=1 )
if patient_result is not None:
__snake_case = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(a_ ) ):
patient_counter += 1
else:
__snake_case = 0
__snake_case = logits
if patient_counter == self.patience:
break
__snake_case = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"""Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """ , _UpperCamelCase , )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def __init__( self : List[str] , a_ : Tuple ):
"""simple docstring"""
super().__init__(a_ )
__snake_case = config.num_labels
__snake_case = BertModelWithPabee(a_ )
__snake_case = nn.Dropout(config.hidden_dropout_prob )
__snake_case = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(a_ )
def A ( self : int , a_ : str=None , a_ : Tuple=None , a_ : Union[str, Any]=None , a_ : List[str]=None , a_ : Optional[int]=None , a_ : Union[str, Any]=None , a_ : Tuple=None , ):
"""simple docstring"""
__snake_case = self.bert(
input_ids=a_ , attention_mask=a_ , token_type_ids=a_ , position_ids=a_ , head_mask=a_ , inputs_embeds=a_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
__snake_case = (logits[-1],)
if labels is not None:
__snake_case = None
__snake_case = 0
for ix, logits_item in enumerate(a_ ):
if self.num_labels == 1:
# We are doing regression
__snake_case = MSELoss()
__snake_case = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
__snake_case = CrossEntropyLoss()
__snake_case = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
__snake_case = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
__snake_case = (total_loss / total_weights,) + outputs
return outputs
| 680 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a : Optional[int] = {
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = ["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = [
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
a : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 721 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self : str , a_ : Tuple , a_ : Optional[Any]=2 , a_ : str=32 , a_ : Dict=16 , a_ : List[str]=3 , a_ : Dict=True , a_ : Optional[int]=True , a_ : List[str]=32 , a_ : int=4 , a_ : str=[0, 1, 2, 3] , a_ : Any=4 , a_ : Optional[int]=37 , a_ : Any="gelu" , a_ : Optional[int]=0.1 , a_ : Optional[Any]=0.1 , a_ : Union[str, Any]=0.02 , a_ : Union[str, Any]=3 , a_ : Any=[1, 384, 24, 24] , a_ : Optional[Any]=True , a_ : Optional[int]=None , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = is_training
__snake_case = use_labels
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = backbone_out_indices
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = backbone_featmap_shape
__snake_case = scope
__snake_case = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__snake_case = (image_size // patch_size) ** 2
__snake_case = num_patches + 1
def A ( self : int ):
"""simple docstring"""
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case = self.get_config()
return config, pixel_values, labels
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [96, 192, 384, 768],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a_ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=a_ , backbone_featmap_shape=self.backbone_featmap_shape , )
def A ( self : int , a_ : Union[str, Any] , a_ : List[str] , a_ : List[str] ):
"""simple docstring"""
__snake_case = DPTModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : List[Any] , a_ : List[Any] , a_ : Union[str, Any] , a_ : List[str] ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = DPTForDepthEstimation(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def A ( self : Optional[Any] , a_ : List[str] , a_ : int , a_ : Tuple ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = DPTForSemanticSegmentation(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , labels=a_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE = (
{
"""depth-estimation""": DPTForDepthEstimation,
"""feature-extraction""": DPTModel,
"""image-segmentation""": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = DPTModelTester(self )
__snake_case = ConfigTester(self , config_class=a_ , has_text_modality=a_ , hidden_size=37 )
def A ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds" )
def A ( self : Any ):
"""simple docstring"""
pass
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_ , nn.Linear ) )
def A ( self : List[str] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a_ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a_ )
def A ( self : int ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*a_ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
def A ( self : Optional[int] ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = True
if model_class in get_values(a_ ):
continue
__snake_case = model_class(a_ )
model.to(a_ )
model.train()
__snake_case = self._prepare_for_class(a_ , a_ , return_labels=a_ )
__snake_case = model(**a_ ).loss
loss.backward()
def A ( self : int ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = False
__snake_case = True
if model_class in get_values(a_ ) or not model_class.supports_gradient_checkpointing:
continue
__snake_case = model_class(a_ )
model.to(a_ )
model.gradient_checkpointing_enable()
model.train()
__snake_case = self._prepare_for_class(a_ , a_ , return_labels=a_ )
__snake_case = model(**a_ ).loss
loss.backward()
def A ( self : Dict ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = _config_zero_init(a_ )
for model_class in self.all_model_classes:
__snake_case = model_class(config=a_ )
# Skip the check for the backbone
__snake_case = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__snake_case = [f'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A ( self : Tuple ):
"""simple docstring"""
pass
@slow
def A ( self : int ):
"""simple docstring"""
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__snake_case = DPTModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def A ( self : int ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = "add"
with self.assertRaises(a_ ):
__snake_case = DPTForDepthEstimation(a_ )
def __UpperCAmelCase ( ) -> Union[str, Any]:
__snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : Dict ):
"""simple docstring"""
__snake_case = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas" )
__snake_case = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas" ).to(a_ )
__snake_case = prepare_img()
__snake_case = image_processor(images=a_ , return_tensors="pt" ).to(a_ )
# forward pass
with torch.no_grad():
__snake_case = model(**a_ )
__snake_case = outputs.predicted_depth
# verify the predicted depth
__snake_case = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , a_ )
__snake_case = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , a_ , atol=1e-4 ) )
| 680 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
a : Optional[Any] = logging.get_logger(__name__)
def __UpperCAmelCase ( _UpperCAmelCase : Any ) -> List[str]:
# initialize config
if "resnet-50" in model_name:
__snake_case = ResNetConfig.from_pretrained("microsoft/resnet-50" )
elif "resnet-101" in model_name:
__snake_case = ResNetConfig.from_pretrained("microsoft/resnet-101" )
else:
raise ValueError("Model name should include either resnet50 or resnet101" )
__snake_case = DetrConfig(use_timm_backbone=_UpperCAmelCase , backbone_config=_UpperCAmelCase )
# set label attributes
__snake_case = '''panoptic''' in model_name
if is_panoptic:
__snake_case = 2_50
else:
__snake_case = 91
__snake_case = '''huggingface/label-files'''
__snake_case = '''coco-detection-id2label.json'''
__snake_case = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type="dataset" ) , "r" ) )
__snake_case = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
__snake_case = idalabel
__snake_case = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def __UpperCAmelCase ( _UpperCAmelCase : Tuple ) -> str:
# here we list all keys to be renamed (original name on the left, our name on the right)
__snake_case = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight") )
rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight") )
rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias") )
rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean") )
rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
F'''encoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
F'''decoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
) )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
) )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
] )
return rename_keys
def __UpperCAmelCase ( _UpperCAmelCase : Any , _UpperCAmelCase : int , _UpperCAmelCase : List[str] ) -> List[str]:
__snake_case = state_dict.pop(_UpperCAmelCase )
__snake_case = val
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict=False ) -> Optional[Any]:
__snake_case = ''''''
if is_panoptic:
__snake_case = '''detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__snake_case = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
__snake_case = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__snake_case = in_proj_weight[:2_56, :]
__snake_case = in_proj_bias[:2_56]
__snake_case = in_proj_weight[2_56:5_12, :]
__snake_case = in_proj_bias[2_56:5_12]
__snake_case = in_proj_weight[-2_56:, :]
__snake_case = in_proj_bias[-2_56:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
__snake_case = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
__snake_case = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__snake_case = in_proj_weight[:2_56, :]
__snake_case = in_proj_bias[:2_56]
__snake_case = in_proj_weight[2_56:5_12, :]
__snake_case = in_proj_bias[2_56:5_12]
__snake_case = in_proj_weight[-2_56:, :]
__snake_case = in_proj_bias[-2_56:]
# read in weights + bias of input projection layer of cross-attention
__snake_case = state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
__snake_case = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
__snake_case = in_proj_weight_cross_attn[:2_56, :]
__snake_case = in_proj_bias_cross_attn[:2_56]
__snake_case = in_proj_weight_cross_attn[2_56:5_12, :]
__snake_case = in_proj_bias_cross_attn[2_56:5_12]
__snake_case = in_proj_weight_cross_attn[-2_56:, :]
__snake_case = in_proj_bias_cross_attn[-2_56:]
def __UpperCAmelCase ( ) -> Optional[int]:
__snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__snake_case = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str=None , _UpperCAmelCase : Optional[Any]=False ) -> Dict:
__snake_case = get_detr_config(_UpperCAmelCase )
# load original model from torch hub
__snake_case = {
'''detr-resnet-50''': '''detr_resnet50''',
'''detr-resnet-101''': '''detr_resnet101''',
}
logger.info(F'''Converting model {model_name}...''' )
__snake_case = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=_UpperCAmelCase ).eval()
__snake_case = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(_UpperCAmelCase ):
if is_panoptic:
__snake_case = '''detr.''' + src
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(_UpperCAmelCase , is_panoptic=_UpperCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__snake_case = '''detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
__snake_case = state_dict.pop(_UpperCAmelCase )
__snake_case = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__snake_case = state_dict.pop(_UpperCAmelCase )
__snake_case = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
__snake_case = state_dict.pop(_UpperCAmelCase )
__snake_case = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
__snake_case = state_dict.pop(_UpperCAmelCase )
__snake_case = val
# finally, create HuggingFace model and load state dict
__snake_case = DetrForSegmentation(_UpperCAmelCase ) if is_panoptic else DetrForObjectDetection(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
# verify our conversion on an image
__snake_case = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
__snake_case = DetrImageProcessor(format=_UpperCAmelCase )
__snake_case = processor(images=prepare_img() , return_tensors="pt" )
__snake_case = encoding['''pixel_values''']
__snake_case = detr(_UpperCAmelCase )
__snake_case = model(_UpperCAmelCase )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
processor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("Uploading PyTorch model and image processor to the hub..." )
model.push_to_hub(F'''nielsr/{model_name}''' )
processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
a : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''detr-resnet-50''',
type=str,
choices=['''detr-resnet-50''', '''detr-resnet-101'''],
help='''Name of the DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the model to the hub or not.''')
a : Dict = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 700 |
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
def A ( self : Any ):
"""simple docstring"""
return self.__class__(**{k: copy.deepcopy(a_ ) for k, v in self.__dict__.items()} )
| 680 | 0 |
'''simple docstring'''
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
a : Optional[Any] = '''http://www.mocksite.com/file1.txt'''
a : int = '''\"text\": [\"foo\", \"foo\"]'''
a : str = '''6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'''
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = 200
__SCREAMING_SNAKE_CASE = {"Content-Length": "100"}
__SCREAMING_SNAKE_CASE = {}
def A ( self : List[str] , **a_ : Optional[int] ):
"""simple docstring"""
return [bytes(_lowerCamelCase , "utf-8" )]
def __UpperCAmelCase ( *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : Tuple ) -> Dict:
return MockResponse()
@pytest.mark.parametrize("urls_type" , [str, list, dict] )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : List[str] ) -> List[str]:
import requests
monkeypatch.setattr(_lowerCAmelCase , "request" , _lowerCAmelCase )
__snake_case = URL
if issubclass(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case = url
elif issubclass(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case = [url]
elif issubclass(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case = {"train": url}
__snake_case = "dummy"
__snake_case = "downloads"
__snake_case = tmp_path
__snake_case = DownloadConfig(
cache_dir=os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , use_etag=_lowerCAmelCase , )
__snake_case = DownloadManager(dataset_name=_lowerCAmelCase , download_config=_lowerCAmelCase )
__snake_case = dl_manager.download(_lowerCAmelCase )
__snake_case = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case = [downloaded_paths]
__snake_case = [urls]
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
assert "train" in downloaded_paths.keys()
__snake_case = downloaded_paths.values()
__snake_case = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(_lowerCAmelCase , _lowerCAmelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
__snake_case = Path(_lowerCAmelCase )
__snake_case = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
__snake_case = downloaded_path.read_text()
assert content == CONTENT
__snake_case = downloaded_path.with_suffix(".json" )
assert metadata_downloaded_path.exists()
__snake_case = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("paths_type" , [str, list, dict] )
def __UpperCAmelCase ( _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple ) -> Optional[Any]:
__snake_case = str(_lowerCAmelCase )
if issubclass(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case = filename
elif issubclass(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case = [filename]
elif issubclass(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case = {"train": filename}
__snake_case = "dummy"
__snake_case = xz_file.parent
__snake_case = "extracted"
__snake_case = DownloadConfig(
cache_dir=_lowerCAmelCase , use_etag=_lowerCAmelCase , )
__snake_case = DownloadManager(dataset_name=_lowerCAmelCase , download_config=_lowerCAmelCase )
__snake_case = dl_manager.extract(_lowerCAmelCase )
__snake_case = paths
for extracted_paths in [extracted_paths]:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case = [extracted_paths]
__snake_case = [paths]
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
assert "train" in extracted_paths.keys()
__snake_case = extracted_paths.values()
__snake_case = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(_lowerCAmelCase , _lowerCAmelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
__snake_case = Path(_lowerCAmelCase )
__snake_case = extracted_path.parts
assert parts[-1] == hash_url_to_filename(_lowerCAmelCase , etag=_lowerCAmelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
__snake_case = extracted_path.read_text()
__snake_case = text_file.read_text()
assert extracted_file_content == expected_file_content
def __UpperCAmelCase ( _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] ) -> Union[str, Any]:
assert path.endswith(".jsonl" )
for num_items, line in enumerate(_lowerCAmelCase , start=1 ):
__snake_case = json.loads(line.decode("utf-8" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("archive_jsonl" , ["tar_jsonl_path", "zip_jsonl_path"] )
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : str ) -> Any:
__snake_case = request.getfixturevalue(_lowerCAmelCase )
__snake_case = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(_lowerCAmelCase ) , start=1 ):
_test_jsonl(_lowerCAmelCase , _lowerCAmelCase )
assert num_jsonl == 2
@pytest.mark.parametrize("archive_nested_jsonl" , ["tar_nested_jsonl_path", "zip_nested_jsonl_path"] )
def __UpperCAmelCase ( _UpperCAmelCase : Any , _UpperCAmelCase : int ) -> Optional[Any]:
__snake_case = request.getfixturevalue(_lowerCAmelCase )
__snake_case = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(_lowerCAmelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(_lowerCAmelCase ) , start=1 ):
_test_jsonl(_lowerCAmelCase , _lowerCAmelCase )
assert num_tar == 1
assert num_jsonl == 2
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] ) -> Dict:
__snake_case = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(_lowerCAmelCase ) , start=1 ):
assert os.path.basename(_lowerCAmelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 701 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
a : Optional[Any] = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : int ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = "A painting of a squirrel eating a burger "
__snake_case = torch.manual_seed(0 )
__snake_case = pipe(
prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a_ )
__snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained(a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = generator.manual_seed(0 )
__snake_case = pipe(
prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = "A painting of a squirrel eating a burger "
__snake_case = torch.manual_seed(0 )
__snake_case = pipe(
prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
__snake_case = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 680 | 0 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
a : Any = False
a : Tuple = True
a : Optional[int] = False
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--repo_path''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
a : Tuple = parser.parse_args()
a : List[str] = {
'image_size': 'sample_size',
'num_res_blocks': 'layers_per_block',
'block_channels': 'block_out_channels',
'down_blocks': 'down_block_types',
'up_blocks': 'up_block_types',
'downscale_freq_shift': 'freq_shift',
'resnet_num_groups': 'norm_num_groups',
'resnet_act_fn': 'act_fn',
'resnet_eps': 'norm_eps',
'num_head_channels': 'attention_head_dim',
}
a : int = {
'time_steps': 'time_proj',
'mid': 'mid_block',
'downsample_blocks': 'down_blocks',
'upsample_blocks': 'up_blocks',
}
a : str = '' if has_file(args.repo_path, '''config.json''') else 'unet'
with open(os.path.join(args.repo_path, subfolder, '''config.json'''), '''r''', encoding='''utf-8''') as reader:
a : Tuple = reader.read()
a : Dict = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, '''config.json'''):
a : Optional[Any] = UNetaDModel(**config)
else:
a : Union[str, Any] = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel
a : Any = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
a : List[str] = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
a : int = config[key]
del config[key]
a : int = [k.replace('''UNetRes''', '''''') for k in config['down_block_types']]
a : Tuple = [k.replace('''UNetRes''', '''''') for k in config['up_block_types']]
if do_only_weights:
a : Tuple = torch.load(os.path.join(args.repo_path, subfolder, '''diffusion_pytorch_model.bin'''))
a : int = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('''.op.bias''') or param_key.endswith('''.op.weight'''):
continue
a : Any = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('''.''')[0] == key:
a : Tuple = param_value
a : str = True
if not has_changed:
a : int = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 702 |
'''simple docstring'''
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('''>=''', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
a : Any = get_logger(__name__)
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any]=0 ) -> Any:
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
with FSDP.state_dict_type(
_UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__snake_case = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__snake_case = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if accelerator.process_index == 0:
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__snake_case = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__snake_case = os.path.join(_UpperCAmelCase , F'''{MODEL_NAME}_{model_index}''' )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
logger.info(F'''Saving model to {ckpt_dir}''' )
__snake_case = {"model": state_dict}
dist_cp.save_state_dict(
state_dict=_UpperCAmelCase , storage_writer=dist_cp.FileSystemWriter(_UpperCAmelCase ) , planner=DefaultSavePlanner() , )
logger.info(F'''Model saved to {ckpt_dir}''' )
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : str=0 ) -> List[str]:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
_UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(_UpperCAmelCase ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"Set the `sync_module_states` flag to `True` so that model states are synced across processes when "
"initializing FSDP object" )
return
__snake_case = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Loading model from {input_model_file}''' )
__snake_case = torch.load(_UpperCAmelCase )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__snake_case = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Loading model from {input_model_file}''' )
__snake_case = torch.load(_UpperCAmelCase )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__snake_case = (
os.path.join(_UpperCAmelCase , F'''{MODEL_NAME}_{model_index}''' )
if F'''{MODEL_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading model from {ckpt_dir}''' )
__snake_case = {"model": model.state_dict()}
dist_cp.load_state_dict(
state_dict=_UpperCAmelCase , storage_reader=dist_cp.FileSystemReader(_UpperCAmelCase ) , planner=DefaultLoadPlanner() , )
__snake_case = state_dict["model"]
logger.info(F'''Model loaded from {ckpt_dir}''' )
model.load_state_dict(_UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple=0 ) -> Union[str, Any]:
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
with FSDP.state_dict_type(
_UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__snake_case = FSDP.optim_state_dict(_UpperCAmelCase , _UpperCAmelCase )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
__snake_case = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Saving Optimizer state to {output_optimizer_file}''' )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Optimizer state saved in {output_optimizer_file}''' )
else:
__snake_case = os.path.join(_UpperCAmelCase , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
logger.info(F'''Saving Optimizer state to {ckpt_dir}''' )
dist_cp.save_state_dict(
state_dict={"optimizer": optim_state} , storage_writer=dist_cp.FileSystemWriter(_UpperCAmelCase ) , planner=DefaultSavePlanner() , )
logger.info(F'''Optimizer state saved in {ckpt_dir}''' )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int]=0 ) -> Union[str, Any]:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
_UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__snake_case = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
__snake_case = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Loading Optimizer state from {input_optimizer_file}''' )
__snake_case = torch.load(_UpperCAmelCase )
logger.info(F'''Optimizer state loaded from {input_optimizer_file}''' )
else:
__snake_case = (
os.path.join(_UpperCAmelCase , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
if F'''{OPTIMIZER_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading Optimizer from {ckpt_dir}''' )
__snake_case = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="optimizer" , storage_reader=dist_cp.FileSystemReader(_UpperCAmelCase ) , )
__snake_case = optim_state["optimizer"]
logger.info(F'''Optimizer loaded from {ckpt_dir}''' )
__snake_case = FSDP.optim_state_dict_to_load(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
optimizer.load_state_dict(_UpperCAmelCase )
| 680 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a : Optional[Any] = {
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = ["ConvNextFeatureExtractor"]
a : List[str] = ["ConvNextImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = [
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
"ConvNextBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
a : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 703 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
__snake_case = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_UpperCAmelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 0 |
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
a : List[Any] = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
a : str = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
a : List[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_000))
def __UpperCAmelCase ( _UpperCAmelCase : Any , _UpperCAmelCase : int ) -> List[Any]:
__snake_case = len([g for position, g in enumerate(__SCREAMING_SNAKE_CASE ) if g == main_target[position]] )
return (item, float(__SCREAMING_SNAKE_CASE ))
def __UpperCAmelCase ( _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] ) -> List[Any]:
__snake_case = random.randint(0 , len(__SCREAMING_SNAKE_CASE ) - 1 )
__snake_case = parent_a[:random_slice] + parent_a[random_slice:]
__snake_case = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __UpperCAmelCase ( _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
__snake_case = list(__SCREAMING_SNAKE_CASE )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__snake_case = random.choice(__SCREAMING_SNAKE_CASE )
return "".join(__SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( _UpperCAmelCase : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , ) -> List[str]:
__snake_case = []
# Generate more children proportionally to the fitness score.
__snake_case = int(parent_a[1] * 1_00 ) + 1
__snake_case = 10 if child_n >= 10 else child_n
for _ in range(__SCREAMING_SNAKE_CASE ):
__snake_case = population_score[random.randint(0 , __SCREAMING_SNAKE_CASE )][0]
__snake_case = crossover(parent_a[0] , __SCREAMING_SNAKE_CASE )
# Append new string to the population list.
pop.append(mutate(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
pop.append(mutate(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
return pop
def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] = True ) -> Any:
if N_POPULATION < N_SELECTED:
__snake_case = F'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(__SCREAMING_SNAKE_CASE )
# Verify that the target contains no genes besides the ones inside genes variable.
__snake_case = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__snake_case = F'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(__SCREAMING_SNAKE_CASE )
# Generate random starting population.
__snake_case = []
for _ in range(__SCREAMING_SNAKE_CASE ):
population.append("".join([random.choice(__SCREAMING_SNAKE_CASE ) for i in range(len(__SCREAMING_SNAKE_CASE ) )] ) )
# Just some logs to know what the algorithms is doing.
__snake_case = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__SCREAMING_SNAKE_CASE )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__snake_case = [evaluate(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for item in population]
# Check if there is a matching evolution.
__snake_case = sorted(__SCREAMING_SNAKE_CASE , key=lambda _UpperCAmelCase : x[1] , reverse=__SCREAMING_SNAKE_CASE )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'''\nGeneration: {generation}'''
F'''\nTotal Population:{total_population}'''
F'''\nBest score: {population_score[0][1]}'''
F'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__snake_case = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__SCREAMING_SNAKE_CASE )
# Normalize population score to be between 0 and 1.
__snake_case = [
(item, score / len(__SCREAMING_SNAKE_CASE )) for item, score in population_score
]
# This is selection
for i in range(__SCREAMING_SNAKE_CASE ):
population.extend(select(population_score[int(__SCREAMING_SNAKE_CASE )] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__SCREAMING_SNAKE_CASE ) > N_POPULATION:
break
if __name__ == "__main__":
a : Union[str, Any] = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
a : Union[str, Any] = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
a , a , a : int = basic(target_str, genes_list)
print(
F'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 704 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> str:
if number > 0:
raise ValueError("input must be a negative integer" )
__snake_case = len(bin(_UpperCAmelCase )[3:] )
__snake_case = bin(abs(_UpperCAmelCase ) - (1 << binary_number_length) )[3:]
__snake_case = (
(
"1"
+ "0" * (binary_number_length - len(_UpperCAmelCase ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.