code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def snake_case_ ( _lowerCAmelCase : Any ) -> Tuple:
if "model" in orig_key:
UpperCAmelCase : Dict = orig_key.replace('''model.''' , '''''' )
if "norm1" in orig_key:
UpperCAmelCase : Optional[int] = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' )
if "norm2" in orig_key:
UpperCAmelCase : Optional[int] = orig_key.replace('''norm2''' , '''output.LayerNorm''' )
if "norm" in orig_key:
UpperCAmelCase : Optional[int] = orig_key.replace('''norm''' , '''LayerNorm''' )
if "transformer" in orig_key:
UpperCAmelCase : Dict = orig_key.split('''.''' )[0].split('''_''' )[-1]
UpperCAmelCase : Optional[Any] = orig_key.replace(f"""transformer_{layer_num}""" , f"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
UpperCAmelCase : Any = orig_key.replace('''mha.attn''' , '''attention.self''' )
if "mha" in orig_key:
UpperCAmelCase : str = orig_key.replace('''mha''' , '''attention''' )
if "W_q" in orig_key:
UpperCAmelCase : Tuple = orig_key.replace('''W_q''' , '''self.query''' )
if "W_k" in orig_key:
UpperCAmelCase : Tuple = orig_key.replace('''W_k''' , '''self.key''' )
if "W_v" in orig_key:
UpperCAmelCase : Optional[int] = orig_key.replace('''W_v''' , '''self.value''' )
if "ff1" in orig_key:
UpperCAmelCase : List[str] = orig_key.replace('''ff1''' , '''intermediate.dense''' )
if "ff2" in orig_key:
UpperCAmelCase : List[Any] = orig_key.replace('''ff2''' , '''output.dense''' )
if "ff" in orig_key:
UpperCAmelCase : int = orig_key.replace('''ff''' , '''output.dense''' )
if "mlm_class" in orig_key:
UpperCAmelCase : Tuple = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' )
if "mlm" in orig_key:
UpperCAmelCase : Union[str, Any] = orig_key.replace('''mlm''' , '''cls.predictions.transform''' )
if "cls" not in orig_key:
UpperCAmelCase : str = '''yoso.''' + orig_key
return orig_key
def snake_case_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict ) -> Any:
for key in orig_state_dict.copy().keys():
UpperCAmelCase : Any = orig_state_dict.pop(_lowerCAmelCase )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
UpperCAmelCase : Union[str, Any] = val
UpperCAmelCase : Dict = orig_state_dict['''cls.predictions.decoder.bias''']
UpperCAmelCase : Union[str, Any] = torch.arange(_lowerCAmelCase ).expand((1, -1) ) + 2
return orig_state_dict
def snake_case_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int ) -> Optional[int]:
UpperCAmelCase : str = torch.load(_lowerCAmelCase , map_location='''cpu''' )['''model_state_dict''']
UpperCAmelCase : str = YosoConfig.from_json_file(_lowerCAmelCase )
UpperCAmelCase : int = YosoForMaskedLM(_lowerCAmelCase )
UpperCAmelCase : Tuple = convert_checkpoint_helper(config.max_position_embeddings , _lowerCAmelCase )
print(model.load_state_dict(_lowerCAmelCase ) )
model.eval()
model.save_pretrained(_lowerCAmelCase )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
UpperCamelCase__: List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for YOSO model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCamelCase__: List[Any] = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 23 |
'''simple docstring'''
from __future__ import annotations
def snake_case_ ( _lowerCAmelCase : list[int | float] , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> int | float:
if len(_lowerCAmelCase ) == 0:
raise ValueError('''find_max() arg is an empty sequence''' )
if (
left >= len(_lowerCAmelCase )
or left < -len(_lowerCAmelCase )
or right >= len(_lowerCAmelCase )
or right < -len(_lowerCAmelCase )
):
raise IndexError('''list index out of range''' )
if left == right:
return nums[left]
UpperCAmelCase : List[Any] = (left + right) >> 1 # the middle
UpperCAmelCase : Optional[Any] = find_max(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # find max in range[left, mid]
UpperCAmelCase : Dict = find_max(_lowerCAmelCase , mid + 1 , _lowerCAmelCase ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 23 | 1 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase_ : Tuple = 1_6
lowerCAmelCase_ : List[str] = 3_2
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase = 16 ):
'''simple docstring'''
UpperCAmelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
UpperCAmelCase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowerCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCAmelCase , max_length=lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase = datasets.map(
lowerCAmelCase , batched=lowerCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowerCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase = 8
else:
UpperCAmelCase = None
return tokenizer.pad(
lowerCAmelCase , padding="""longest""" , max_length=lowerCAmelCase , pad_to_multiple_of=lowerCAmelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
UpperCAmelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCAmelCase , collate_fn=lowerCAmelCase , batch_size=lowerCAmelCase )
UpperCAmelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCAmelCase , collate_fn=lowerCAmelCase , batch_size=lowerCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase_ : int = mocked_dataloaders # noqa: F811
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowerCAmelCase ) == "1":
UpperCAmelCase = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
UpperCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="""all""" , project_dir=args.project_dir )
else:
UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase = config["""lr"""]
UpperCAmelCase = int(config["""num_epochs"""] )
UpperCAmelCase = int(config["""seed"""] )
UpperCAmelCase = int(config["""batch_size"""] )
set_seed(lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase = get_dataloaders(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase = AdamW(params=model.parameters() , lr=lowerCAmelCase )
# Instantiate scheduler
UpperCAmelCase = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
UpperCAmelCase = os.path.split(lowerCAmelCase )[-1].split(""".""" )[0]
accelerator.init_trackers(lowerCAmelCase , lowerCAmelCase )
# Now we train the model
for epoch in range(lowerCAmelCase ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
UpperCAmelCase = 0
for step, batch in enumerate(lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase = model(**lowerCAmelCase )
UpperCAmelCase = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
UpperCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase = model(**lowerCAmelCase )
UpperCAmelCase = outputs.logits.argmax(dim=-1 )
UpperCAmelCase , UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowerCAmelCase , references=lowerCAmelCase , )
UpperCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , lowerCAmelCase )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"""accuracy""": eval_metric["""accuracy"""],
"""f1""": eval_metric["""f1"""],
"""train_loss""": total_loss.item() / len(lowerCAmelCase ),
"""epoch""": epoch,
} , step=lowerCAmelCase , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowerCAmelCase , default=lowerCAmelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
parser.add_argument(
"""--with_tracking""" , action="""store_true""" , help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" , )
parser.add_argument(
"""--project_dir""" , type=lowerCAmelCase , default="""logs""" , help="""Location on where to store experiment tracking logs` and relevent project information""" , )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowerCAmelCase , lowerCAmelCase )
if __name__ == "__main__":
main()
| 355 |
"""simple docstring"""
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(lowerCAmelCase ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 248 | 0 |
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def a( A : Optional[Any] ) -> Optional[int]: # picklable for multiprocessing
"""simple docstring"""
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def a( ) -> Union[str, Any]:
"""simple docstring"""
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
a = [1, 2, 3]
with pytest.raises(A ):
with parallel_backend("unsupported backend" ):
map_nested(A , A , num_proc=2 )
with pytest.raises(A ):
with parallel_backend("unsupported backend" ):
map_nested(A , A , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" , [2, -1] )
def a( A : Dict ) -> List[str]:
"""simple docstring"""
a = [1, 2]
a = {"a": 1, "b": 2}
a = {"a": [1, 2], "b": [3, 4]}
a = {"a": {"1": 1}, "b": 2}
a = {"a": 1, "b": 2, "c": 3, "d": 4}
a = [2, 3]
a = {"a": 2, "b": 3}
a = {"a": [2, 3], "b": [4, 5]}
a = {"a": {"1": 2}, "b": 3}
a = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(A , A , num_proc=A ) == expected_map_nested_sa
assert map_nested(A , A , num_proc=A ) == expected_map_nested_sa
assert map_nested(A , A , num_proc=A ) == expected_map_nested_sa
assert map_nested(A , A , num_proc=A ) == expected_map_nested_sa
assert map_nested(A , A , num_proc=A ) == expected_map_nested_sa
| 227 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase: str = logging.get_logger(__name__)
_lowercase: List[str] = {
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
__A = "trocr"
__A = ["past_key_values"]
__A = {
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__(self , lowerCamelCase_=50265 , lowerCamelCase_=1024 , lowerCamelCase_=12 , lowerCamelCase_=16 , lowerCamelCase_=4096 , lowerCamelCase_="gelu" , lowerCamelCase_=512 , lowerCamelCase_=0.1 , lowerCamelCase_=0.0 , lowerCamelCase_=0.0 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=0.0 , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=1 , lowerCamelCase_=0 , lowerCamelCase_=2 , **lowerCamelCase_ , ):
"""simple docstring"""
a = vocab_size
a = d_model
a = decoder_layers
a = decoder_attention_heads
a = decoder_ffn_dim
a = activation_function
a = max_position_embeddings
a = dropout
a = attention_dropout
a = activation_dropout
a = init_std
a = decoder_layerdrop
a = use_cache
a = scale_embedding
a = use_learned_position_embeddings
a = layernorm_embedding
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
| 227 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( lowerCamelCase_ : Optional[int] ):
stooge(lowerCamelCase_ , 0 , len(lowerCamelCase_ ) - 1 )
return arr
def _lowerCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Tuple , lowerCamelCase_ : Union[str, Any] ):
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
__lowercase , __lowercase = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
__lowercase = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(lowerCamelCase_ , lowerCamelCase_ , (h - t) )
# Recursively sort last 2/3 elements
stooge(lowerCamelCase_ , i + t , (lowerCamelCase_) )
# Recursively sort first 2/3 elements
stooge(lowerCamelCase_ , lowerCamelCase_ , (h - t) )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = input('''Enter numbers separated by a comma:\n''').strip()
_SCREAMING_SNAKE_CASE = [int(item) for item in user_input.split(''',''')]
print(stooge_sort(unsorted))
| 364 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
_SCREAMING_SNAKE_CASE = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_SCREAMING_SNAKE_CASE = {
'''vocab_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'''
),
'''google/electra-base-generator''': '''https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt''',
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'''
),
'''google/electra-base-generator''': (
'''https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'''
),
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'''
),
},
}
_SCREAMING_SNAKE_CASE = {
'''google/electra-small-generator''': 5_1_2,
'''google/electra-base-generator''': 5_1_2,
'''google/electra-large-generator''': 5_1_2,
'''google/electra-small-discriminator''': 5_1_2,
'''google/electra-base-discriminator''': 5_1_2,
'''google/electra-large-discriminator''': 5_1_2,
}
_SCREAMING_SNAKE_CASE = {
'''google/electra-small-generator''': {'''do_lower_case''': True},
'''google/electra-base-generator''': {'''do_lower_case''': True},
'''google/electra-large-generator''': {'''do_lower_case''': True},
'''google/electra-small-discriminator''': {'''do_lower_case''': True},
'''google/electra-base-discriminator''': {'''do_lower_case''': True},
'''google/electra-large-discriminator''': {'''do_lower_case''': True},
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : str = VOCAB_FILES_NAMES
a : Dict = PRETRAINED_VOCAB_FILES_MAP
a : int = PRETRAINED_INIT_CONFIGURATION
a : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[Any] = ElectraTokenizer
def __init__(self ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase=True ,_lowerCamelCase="[UNK]" ,_lowerCamelCase="[SEP]" ,_lowerCamelCase="[PAD]" ,_lowerCamelCase="[CLS]" ,_lowerCamelCase="[MASK]" ,_lowerCamelCase=True ,_lowerCamelCase=None ,**_lowerCamelCase ,) -> List[Any]:
'''simple docstring'''
super().__init__(
_lowerCamelCase ,tokenizer_file=_lowerCamelCase ,do_lower_case=_lowerCamelCase ,unk_token=_lowerCamelCase ,sep_token=_lowerCamelCase ,pad_token=_lowerCamelCase ,cls_token=_lowerCamelCase ,mask_token=_lowerCamelCase ,tokenize_chinese_chars=_lowerCamelCase ,strip_accents=_lowerCamelCase ,**_lowerCamelCase ,)
__lowercase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' ,_lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' ,_lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' ,_lowerCamelCase ) != tokenize_chinese_chars
):
__lowercase = getattr(_lowerCamelCase ,normalizer_state.pop('''type''' ) )
__lowercase = do_lower_case
__lowercase = strip_accents
__lowercase = tokenize_chinese_chars
__lowercase = normalizer_class(**_lowerCamelCase )
__lowercase = do_lower_case
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase=None ) -> str:
'''simple docstring'''
__lowercase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> List[int]:
'''simple docstring'''
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
__lowercase = self._tokenizer.model.save(_lowerCamelCase ,name=_lowerCamelCase )
return tuple(_lowerCamelCase )
| 217 | 0 |
def lowercase( UpperCamelCase_ , UpperCamelCase_ = " " ) -> list:
'''simple docstring'''
UpperCamelCase = []
UpperCamelCase = 0
for index, char in enumerate(UpperCamelCase_ ):
if char == separator:
split_words.append(string[last_index:index] )
UpperCamelCase = index + 1
elif index + 1 == len(UpperCamelCase_ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 343 | from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Optional[int] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str=3 , lowerCamelCase_ : Tuple=32 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Optional[int]=10 , lowerCamelCase_ : List[str]=[10, 20, 30, 40] , lowerCamelCase_ : Tuple=[1, 1, 2, 1] , lowerCamelCase_ : Dict=True , lowerCamelCase_ : str=True , lowerCamelCase_ : Tuple="relu" , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Dict=None , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = num_channels
UpperCamelCase = embeddings_size
UpperCamelCase = hidden_sizes
UpperCamelCase = depths
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_act
UpperCamelCase = num_labels
UpperCamelCase = scope
UpperCamelCase = len(lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple ):
"""simple docstring"""
UpperCamelCase = TFResNetModel(config=lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = TFResNetForImageClassification(lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
__lowerCAmelCase = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = TFResNetModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase_ : Tuple , lowerCamelCase_ : int , lowerCamelCase_ : str ):
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCamelCase = layer_type
UpperCamelCase = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = TFResNetModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowercase( ) -> Any:
'''simple docstring'''
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="""tf""" )
# forward pass
UpperCamelCase = model(**lowerCamelCase_ )
# verify the logits
UpperCamelCase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
UpperCamelCase = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowerCamelCase_ , atol=1E-4 ) )
| 343 | 1 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Optional[int] = logging.get_logger(__name__)
lowercase__ : List[str] = {
'BAAI/AltCLIP': 'https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json',
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : Union[str, Any] = """altclip_text_model"""
def __init__( self : str , lowercase_ : Union[str, Any]=250002 , lowercase_ : int=1024 , lowercase_ : List[str]=24 , lowercase_ : int=16 , lowercase_ : List[str]=4096 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : Dict=0.1 , lowercase_ : int=0.1 , lowercase_ : Tuple=514 , lowercase_ : Any=1 , lowercase_ : Dict=0.02 , lowercase_ : Optional[int]=0.02 , lowercase_ : Any=1E-05 , lowercase_ : Any=1 , lowercase_ : Union[str, Any]=0 , lowercase_ : Dict=2 , lowercase_ : List[Any]="absolute" , lowercase_ : Dict=True , lowercase_ : int=768 , **lowercase_ : Any , ):
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
snake_case_ : Dict = vocab_size
snake_case_ : str = hidden_size
snake_case_ : List[Any] = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : int = hidden_act
snake_case_ : Union[str, Any] = intermediate_size
snake_case_ : Any = hidden_dropout_prob
snake_case_ : Union[str, Any] = attention_probs_dropout_prob
snake_case_ : Optional[int] = max_position_embeddings
snake_case_ : List[Any] = type_vocab_size
snake_case_ : List[str] = initializer_range
snake_case_ : Dict = initializer_factor
snake_case_ : Optional[Any] = layer_norm_eps
snake_case_ : Dict = position_embedding_type
snake_case_ : str = use_cache
snake_case_ : int = project_dim
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : Union[str, Any] = """altclip_vision_model"""
def __init__( self : Any , lowercase_ : str=768 , lowercase_ : Optional[int]=3072 , lowercase_ : int=512 , lowercase_ : List[Any]=12 , lowercase_ : Dict=12 , lowercase_ : List[str]=3 , lowercase_ : str=224 , lowercase_ : Optional[int]=32 , lowercase_ : Tuple="quick_gelu" , lowercase_ : Tuple=1E-5 , lowercase_ : List[Any]=0.0 , lowercase_ : Any=0.02 , lowercase_ : Any=1.0 , **lowercase_ : Optional[int] , ):
super().__init__(**lowercase_ )
snake_case_ : Optional[int] = hidden_size
snake_case_ : str = intermediate_size
snake_case_ : List[Any] = projection_dim
snake_case_ : Tuple = num_hidden_layers
snake_case_ : Optional[int] = num_attention_heads
snake_case_ : Union[str, Any] = num_channels
snake_case_ : Any = patch_size
snake_case_ : List[Any] = image_size
snake_case_ : List[Any] = initializer_range
snake_case_ : str = initializer_factor
snake_case_ : Optional[Any] = attention_dropout
snake_case_ : int = layer_norm_eps
snake_case_ : Optional[Any] = hidden_act
@classmethod
def _snake_case ( cls : str , lowercase_ : List[Any] , **lowercase_ : Union[str, Any] ):
cls._set_token_in_kwargs(lowercase_ )
snake_case_ : List[str] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get('''model_type''' ) == "altclip":
snake_case_ : int = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowercase_ , **lowercase_ )
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : str = """altclip"""
_lowerCAmelCase : Union[str, Any] = True
def __init__( self : Dict , lowercase_ : int=None , lowercase_ : Any=None , lowercase_ : Optional[int]=768 , lowercase_ : List[Any]=2.65_92 , **lowercase_ : Union[str, Any] ):
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
snake_case_ : List[Any] = kwargs.pop('''text_config_dict''' , lowercase_ )
snake_case_ : Optional[int] = kwargs.pop('''vision_config_dict''' , lowercase_ )
super().__init__(**lowercase_ )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
snake_case_ : Any = {}
# This is the complete result when using `text_config_dict`.
snake_case_ : Dict = AltCLIPTextConfig(**lowercase_ ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
snake_case_ : List[Any] = (
f"`{key}` is found in both `text_config_dict` and `text_config` but with different values. "
f"The value `text_config_dict[\"{key}\"]` will be used instead."
)
# If inferred from default argument values (just to be super careful)
else:
snake_case_ : Optional[Any] = (
f"`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The "
f"value `text_config[\"{key}\"]` will be overriden."
)
logger.warning(lowercase_ )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
snake_case_ : Union[str, Any] = {}
# This is the complete result when using `vision_config_dict`.
snake_case_ : Any = AltCLIPVisionConfig(**lowercase_ ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
snake_case_ : Optional[int] = {
str(lowercase_ ): value for key, value in _vision_config_dict['''id2label'''].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
snake_case_ : List[str] = (
f"`{key}` is found in both `vision_config_dict` and `vision_config` but with different "
f"values. The value `vision_config_dict[\"{key}\"]` will be used instead."
)
# If inferred from default argument values (just to be super careful)
else:
snake_case_ : List[Any] = (
f"`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. "
f"The value `vision_config[\"{key}\"]` will be overriden."
)
logger.warning(lowercase_ )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
snake_case_ : Union[str, Any] = {}
logger.info('''`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.''' )
if vision_config is None:
snake_case_ : Union[str, Any] = {}
logger.info('''`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.''' )
snake_case_ : Any = AltCLIPTextConfig(**lowercase_ )
snake_case_ : Any = AltCLIPVisionConfig(**lowercase_ )
snake_case_ : str = projection_dim
snake_case_ : str = logit_scale_init_value
snake_case_ : Tuple = 1.0
@classmethod
def _snake_case ( cls : str , lowercase_ : Union[str, Any] , lowercase_ : int , **lowercase_ : Optional[Any] ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase_ )
def _snake_case ( self : Dict ):
snake_case_ : Optional[int] = copy.deepcopy(self.__dict__ )
snake_case_ : Union[str, Any] = self.text_config.to_dict()
snake_case_ : Union[str, Any] = self.vision_config.to_dict()
snake_case_ : Optional[int] = self.__class__.model_type
return output
| 352 |
"""simple docstring"""
import os
def __lowercase ( _a ):
snake_case_ : Tuple = len(grid[0] )
snake_case_ : Optional[int] = len(_a )
snake_case_ : Union[str, Any] = 0
snake_case_ : Union[str, Any] = 0
snake_case_ : List[Any] = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(_a ):
for j in range(n_rows - 3 ):
snake_case_ : Union[str, Any] = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
snake_case_ : int = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
snake_case_ : Dict = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
snake_case_ : List[Any] = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
snake_case_ : List[str] = max(
_a , _a , _a , _a )
if max_product > largest:
snake_case_ : str = max_product
return largest
def __lowercase ( ):
snake_case_ : Tuple = []
with open(os.path.dirname(_a ) + '''/grid.txt''' ) as file:
for line in file:
grid.append(line.strip('''\n''' ).split(''' ''' ) )
snake_case_ : List[str] = [[int(_a ) for i in grid[j]] for j in range(len(_a ) )]
return largest_product(_a )
if __name__ == "__main__":
print(solution())
| 155 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : List[Any] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
_snake_case : Tuple = 1_92
_snake_case : Any = 7_68
_snake_case : Any = 12
_snake_case : List[Any] = 3
_snake_case : int = [8_00, 13_33]
_snake_case : Tuple = False
elif yolos_name == "yolos_s_dWr":
_snake_case : Tuple = 3_30
_snake_case : List[str] = 14
_snake_case : List[str] = 6
_snake_case : Union[str, Any] = 13_20
elif "yolos_s" in yolos_name:
_snake_case : Union[str, Any] = 3_84
_snake_case : List[str] = 15_36
_snake_case : Any = 12
_snake_case : Optional[int] = 6
elif "yolos_b" in yolos_name:
_snake_case : Dict = [8_00, 13_44]
_snake_case : str = 91
_snake_case : Optional[Any] = """huggingface/label-files"""
_snake_case : str = """coco-detection-id2label.json"""
_snake_case : str = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) )
_snake_case : Union[str, Any] = {int(snake_case__ ): v for k, v in idalabel.items()}
_snake_case : List[str] = idalabel
_snake_case : List[str] = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase__ (snake_case__ : dict , snake_case__ : YolosConfig , snake_case__ : bool = False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_snake_case : int = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
_snake_case : Union[str, Any] = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_snake_case : Any = in_proj_weight[: config.hidden_size, :]
_snake_case : Optional[Any] = in_proj_bias[: config.hidden_size]
_snake_case : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_snake_case : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_snake_case : Tuple = in_proj_weight[-config.hidden_size :, :]
_snake_case : List[Any] = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
if "backbone" in name:
_snake_case : str = name.replace("""backbone""" , """vit""" )
if "cls_token" in name:
_snake_case : Union[str, Any] = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "det_token" in name:
_snake_case : str = name.replace("""det_token""" , """embeddings.detection_tokens""" )
if "mid_pos_embed" in name:
_snake_case : str = name.replace("""mid_pos_embed""" , """encoder.mid_position_embeddings""" )
if "pos_embed" in name:
_snake_case : Tuple = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
_snake_case : str = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "blocks" in name:
_snake_case : str = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
_snake_case : Any = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
_snake_case : str = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
_snake_case : List[str] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
_snake_case : str = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
_snake_case : List[str] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
_snake_case : int = name.replace("""mlp.fc2""" , """output.dense""" )
if "class_embed" in name:
_snake_case : Union[str, Any] = name.replace("""class_embed""" , """class_labels_classifier""" )
if "bbox_embed" in name:
_snake_case : str = name.replace("""bbox_embed""" , """bbox_predictor""" )
if "vit.norm" in name:
_snake_case : Union[str, Any] = name.replace("""vit.norm""" , """vit.layernorm""" )
return name
def UpperCAmelCase__ (snake_case__ : dict , snake_case__ : YolosForObjectDetection ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_snake_case : List[str] = orig_state_dict.pop(snake_case__ )
if "qkv" in key:
_snake_case : Optional[Any] = key.split(""".""" )
_snake_case : Optional[Any] = int(key_split[2] )
_snake_case : Optional[int] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
_snake_case : str = val[:dim, :]
_snake_case : Optional[Any] = val[
dim : dim * 2, :
]
_snake_case : Optional[Any] = val[-dim:, :]
else:
_snake_case : Dict = val[:dim]
_snake_case : Any = val[dim : dim * 2]
_snake_case : Dict = val[-dim:]
else:
_snake_case : Tuple = val
return orig_state_dict
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_snake_case : Union[str, Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str , snake_case__ : str , snake_case__ : bool = False ):
"""simple docstring"""
_snake_case : Optional[Any] = get_yolos_config(snake_case__ )
# load original state_dict
_snake_case : Optional[int] = torch.load(snake_case__ , map_location="""cpu""" )["""model"""]
# load 🤗 model
_snake_case : Optional[Any] = YolosForObjectDetection(snake_case__ )
model.eval()
_snake_case : Optional[Any] = convert_state_dict(snake_case__ , snake_case__ )
model.load_state_dict(snake_case__ )
# Check outputs on an image, prepared by YolosImageProcessor
_snake_case : List[str] = 8_00 if yolos_name != """yolos_ti""" else 5_12
_snake_case : Optional[int] = YolosImageProcessor(format="""coco_detection""" , size=snake_case__ )
_snake_case : Optional[Any] = image_processor(images=prepare_img() , return_tensors="""pt""" )
_snake_case : Optional[Any] = model(**snake_case__ )
_snake_case , _snake_case : Optional[int] = outputs.logits, outputs.pred_boxes
_snake_case , _snake_case : Dict = None, None
if yolos_name == "yolos_ti":
_snake_case : Optional[Any] = torch.tensor(
[[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]] )
_snake_case : Tuple = torch.tensor(
[[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]] )
elif yolos_name == "yolos_s_200_pre":
_snake_case : List[str] = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] )
_snake_case : List[str] = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] )
elif yolos_name == "yolos_s_300_pre":
_snake_case : Dict = torch.tensor(
[[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]] )
_snake_case : Union[str, Any] = torch.tensor(
[[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]] )
elif yolos_name == "yolos_s_dWr":
_snake_case : Tuple = torch.tensor(
[[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]] )
_snake_case : Optional[Any] = torch.tensor(
[[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]] )
elif yolos_name == "yolos_base":
_snake_case : int = torch.tensor(
[[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]] )
_snake_case : Optional[int] = torch.tensor(
[[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]] )
else:
raise ValueError(F"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , snake_case__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , snake_case__ , atol=1e-4 )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
_snake_case : Dict = {
"""yolos_ti""": """yolos-tiny""",
"""yolos_s_200_pre""": """yolos-small""",
"""yolos_s_300_pre""": """yolos-small-300""",
"""yolos_s_dWr""": """yolos-small-dwr""",
"""yolos_base""": """yolos-base""",
}
print("""Pushing to the hub...""" )
_snake_case : str = model_mapping[yolos_name]
image_processor.push_to_hub(snake_case__ , organization="""hustvl""" )
model.push_to_hub(snake_case__ , organization="""hustvl""" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
A_ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 64 |
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : Any=7 ):
"""simple docstring"""
_snake_case : Any = None
if token is not None:
_snake_case : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"}
# The id of a workflow (not of a workflow run)
_snake_case : List[str] = """636036"""
_snake_case : Union[str, Any] = F"https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"
_snake_case : str = requests.get(snake_case__ , headers=snake_case__ ).json()
return result["workflow_runs"]
def UpperCAmelCase__ (snake_case__ : Optional[Any] ):
"""simple docstring"""
_snake_case : str = get_daily_ci_runs(snake_case__ )
_snake_case : str = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
_snake_case : List[str] = workflow_run["""id"""]
break
return workflow_run_id
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
_snake_case : Optional[Any] = get_last_daily_ci_runs(snake_case__ )
if workflow_run_id is not None:
_snake_case : Optional[Any] = get_artifacts_links(worflow_run_id=snake_case__ , token=snake_case__ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
_snake_case : Optional[int] = artifacts_links[artifact_name]
download_artifact(
artifact_name=snake_case__ , artifact_url=snake_case__ , output_dir=snake_case__ , token=snake_case__ )
def UpperCAmelCase__ (snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int ):
"""simple docstring"""
get_last_daily_ci_artifacts(snake_case__ , snake_case__ , snake_case__ )
_snake_case : int = {}
for artifact_name in artifact_names:
_snake_case : int = os.path.join(snake_case__ , F"{artifact_name}.zip" )
if os.path.isfile(snake_case__ ):
_snake_case : Tuple = {}
with zipfile.ZipFile(snake_case__ ) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case__ ):
# read the file
with z.open(snake_case__ ) as f:
_snake_case : Any = f.read().decode("""UTF-8""" )
return results
| 64 | 1 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
__lowercase : List[str] = IFInpaintingSuperResolutionPipeline
__lowercase : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
__lowercase : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
__lowercase : Optional[Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def UpperCAmelCase_ ( self ) -> Any:
return self._get_superresolution_dummy_components()
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=0 ) -> List[Any]:
if str(__UpperCAmelCase ).startswith("""mps""" ):
lowerCAmelCase__ : Any = torch.manual_seed(__UpperCAmelCase )
else:
lowerCAmelCase__ : Dict = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
lowerCAmelCase__ : Tuple = floats_tensor((1, 3, 16, 16) ,rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = floats_tensor((1, 3, 32, 32) ,rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
lowerCAmelCase__ : Any = floats_tensor((1, 3, 32, 32) ,rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
lowerCAmelCase__ : Dict = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() ,reason="""XFormers attention is only available with CUDA and `xformers` installed""" ,)
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def UpperCAmelCase_ ( self ) -> Optional[int]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" ,reason="""float16 requires CUDA""" )
def UpperCAmelCase_ ( self ) -> List[Any]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCAmelCase_ ( self ) -> str:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCAmelCase_ ( self ) -> List[Any]:
self._test_save_load_local()
def UpperCAmelCase_ ( self ) -> int:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 ,)
| 184 |
'''simple docstring'''
import os
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = len(grid[0] )
lowerCAmelCase__ : int = len(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : Optional[Any] = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(UpperCamelCase ):
for j in range(n_rows - 3 ):
lowerCAmelCase__ : str = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
lowerCAmelCase__ : Optional[int] = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
lowerCAmelCase__ : Optional[int] = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
lowerCAmelCase__ : Tuple = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
lowerCAmelCase__ : Dict = max(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
if max_product > largest:
lowerCAmelCase__ : Any = max_product
return largest
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = []
with open(os.path.dirname(UpperCamelCase ) + """/grid.txt""" ) as file:
for line in file:
grid.append(line.strip("""\n""" ).split(""" """ ) )
lowerCAmelCase__ : Dict = [[int(UpperCamelCase ) for i in grid[j]] for j in range(len(UpperCamelCase ) )]
return largest_product(UpperCamelCase )
if __name__ == "__main__":
print(solution())
| 184 | 1 |
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=[] ) -> Any:
'''simple docstring'''
__UpperCAmelCase = size[0] - overlap_pixels * 2
__UpperCAmelCase = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
__UpperCAmelCase = np.ones((size_y, size_x) , dtype=np.uinta ) * 2_5_5
__UpperCAmelCase = np.pad(SCREAMING_SNAKE_CASE , mode='''linear_ramp''' , pad_width=SCREAMING_SNAKE_CASE , end_values=0 )
if "l" in remove_borders:
__UpperCAmelCase = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
__UpperCAmelCase = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
__UpperCAmelCase = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
__UpperCAmelCase = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
return max(SCREAMING_SNAKE_CASE , min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
__UpperCAmelCase = list(SCREAMING_SNAKE_CASE )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
__UpperCAmelCase = clamp_rect(SCREAMING_SNAKE_CASE , [0, 0] , [image_size[0], image_size[1]] )
return rect
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = Image.new('''RGB''' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(SCREAMING_SNAKE_CASE , (original_slice, 0) )
return result
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
__UpperCAmelCase = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
__UpperCAmelCase = tile.crop(SCREAMING_SNAKE_CASE )
return tile
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__UpperCAmelCase = n % d
return n - divisor
class A_ ( snake_case_ ):
'''simple docstring'''
def __init__(self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = 350 , ) -> int:
super().__init__(
vae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , unet=__snake_case , low_res_scheduler=__snake_case , scheduler=__snake_case , max_noise_level=__snake_case , )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , **lowercase__ ) -> int:
torch.manual_seed(0 )
__UpperCAmelCase = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
__UpperCAmelCase = add_overlap_rect(__snake_case , __snake_case , image.size )
__UpperCAmelCase = image.crop(__snake_case )
__UpperCAmelCase = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
__UpperCAmelCase = translated_slice_x - (original_image_slice / 2)
__UpperCAmelCase = max(0 , __snake_case )
__UpperCAmelCase = squeeze_tile(__snake_case , __snake_case , __snake_case , __snake_case )
__UpperCAmelCase = to_input.size
__UpperCAmelCase = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
__UpperCAmelCase = super(__snake_case , self ).__call__(image=__snake_case , **__snake_case ).images[0]
__UpperCAmelCase = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
__UpperCAmelCase = unsqueeze_tile(__snake_case , __snake_case )
__UpperCAmelCase = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
__UpperCAmelCase = []
if x == 0:
remove_borders.append('''l''' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('''r''' )
if y == 0:
remove_borders.append('''t''' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('''b''' )
__UpperCAmelCase = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__snake_case ) , mode='''L''' , )
final_image.paste(
__snake_case , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __snake_case )
@torch.no_grad()
def __call__(self , lowercase__ , lowercase__ , lowercase__ = 75 , lowercase__ = 9.0 , lowercase__ = 50 , lowercase__ = None , lowercase__ = 1 , lowercase__ = 0.0 , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = 1 , lowercase__ = 128 , lowercase__ = 32 , lowercase__ = 32 , ) -> str:
__UpperCAmelCase = Image.new('''RGB''' , (image.size[0] * 4, image.size[1] * 4) )
__UpperCAmelCase = math.ceil(image.size[0] / tile_size )
__UpperCAmelCase = math.ceil(image.size[1] / tile_size )
__UpperCAmelCase = tcx * tcy
__UpperCAmelCase = 0
for y in range(__snake_case ):
for x in range(__snake_case ):
self._process_tile(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , prompt=__snake_case , num_inference_steps=__snake_case , guidance_scale=__snake_case , noise_level=__snake_case , negative_prompt=__snake_case , num_images_per_prompt=__snake_case , eta=__snake_case , generator=__snake_case , latents=__snake_case , )
current_count += 1
if callback is not None:
callback({'''progress''': current_count / total_tile_count, '''image''': final_image} )
return final_image
def __a ( ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase = '''stabilityai/stable-diffusion-x4-upscaler'''
__UpperCAmelCase = StableDiffusionTiledUpscalePipeline.from_pretrained(SCREAMING_SNAKE_CASE , revision='''fp16''' , torch_dtype=torch.floataa )
__UpperCAmelCase = pipe.to('''cuda''' )
__UpperCAmelCase = Image.open('''../../docs/source/imgs/diffusers_library.jpg''' )
def callback(SCREAMING_SNAKE_CASE ):
print(f'''progress: {obj['progress']:.4f}''' )
obj["image"].save('''diffusers_library_progress.jpg''' )
__UpperCAmelCase = pipe(image=SCREAMING_SNAKE_CASE , prompt='''Black font, white background, vector''' , noise_level=4_0 , callback=SCREAMING_SNAKE_CASE )
final_image.save('''diffusers_library.jpg''' )
if __name__ == "__main__":
main()
| 333 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
A__ : List[str] =logging.get_logger(__name__)
A__ : Any ={'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A__ : Any ={
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
A__ : Optional[int] ={
'''junnyu/roformer_chinese_small''': 15_36,
'''junnyu/roformer_chinese_base''': 15_36,
'''junnyu/roformer_chinese_char_small''': 5_12,
'''junnyu/roformer_chinese_char_base''': 5_12,
'''junnyu/roformer_small_discriminator''': 1_28,
'''junnyu/roformer_small_generator''': 1_28,
}
A__ : Optional[int] ={
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class UpperCAmelCase ( snake_case_ ):
_lowercase: Optional[Any] = VOCAB_FILES_NAMES
_lowercase: Tuple = PRETRAINED_VOCAB_FILES_MAP
_lowercase: Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase: str = PRETRAINED_INIT_CONFIGURATION
_lowercase: List[Any] = RoFormerTokenizer
def __init__( self : Dict , __snake_case : str=None , __snake_case : Tuple=None , __snake_case : List[Any]=True , __snake_case : str="[UNK]" , __snake_case : Tuple="[SEP]" , __snake_case : str="[PAD]" , __snake_case : str="[CLS]" , __snake_case : Any="[MASK]" , __snake_case : Dict=True , __snake_case : str=None , **__snake_case : Optional[Any] , ) -> Union[str, Any]:
super().__init__(
__snake_case , tokenizer_file=__snake_case , do_lower_case=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , tokenize_chinese_chars=__snake_case , strip_accents=__snake_case , **__snake_case , )
_lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("""lowercase""" , __snake_case ) != do_lower_case
or pre_tok_state.get("""strip_accents""" , __snake_case ) != strip_accents
):
_lowerCAmelCase = getattr(__snake_case , pre_tok_state.pop("""type""" ) )
_lowerCAmelCase = do_lower_case
_lowerCAmelCase = strip_accents
_lowerCAmelCase = pre_tok_class(**__snake_case )
_lowerCAmelCase = do_lower_case
def __getstate__( self : int ) -> Optional[int]:
_lowerCAmelCase = self.__dict__.copy()
_lowerCAmelCase = BertPreTokenizer()
return state
def __setstate__( self : Tuple , __snake_case : Tuple ) -> List[str]:
_lowerCAmelCase = d
_lowerCAmelCase = self.__dict__["""_tokenizer"""].get_vocab()
_lowerCAmelCase = PreTokenizer.custom(JiebaPreTokenizer(__snake_case ) )
def lowercase__ ( self : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Optional[int]=None ) -> Optional[Any]:
_lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ ( self : List[str] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : int , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]:
_lowerCAmelCase = self._tokenizer.model.save(__snake_case , name=__snake_case )
return tuple(__snake_case )
def lowercase__ ( self : Dict , __snake_case : Dict , __snake_case : int=None , __snake_case : List[Any]=None , __snake_case : List[Any]=False , **__snake_case : Dict , ) -> str:
_lowerCAmelCase = BertPreTokenizer()
return super().save_pretrained(__snake_case , __snake_case , __snake_case , __snake_case , **__snake_case )
| 70 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_UpperCAmelCase : Union[str, Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
_UpperCAmelCase : Dict = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
_UpperCAmelCase : List[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowercase :
__lowercase : Optional[str] = field(
default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} )
__lowercase : Optional[str] = field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
__lowercase : Optional[str] = field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "The column name of the images in the files. If not set, will try to use 'image' or 'img'."} , )
__lowercase : Optional[str] = field(default=_SCREAMING_SNAKE_CASE , metadata={"help": "A folder containing the training data."} )
__lowercase : Optional[str] = field(default=_SCREAMING_SNAKE_CASE , metadata={"help": "A folder containing the validation data."} )
__lowercase : Optional[float] = field(
default=0.1_5 , metadata={"help": "Percent to split off of train for validation."} )
__lowercase : int = field(default=32 , metadata={"help": "The size of the square patches to use for masking."} )
__lowercase : float = field(
default=0.6 , metadata={"help": "Percentage of patches to mask."} , )
__lowercase : Optional[int] = field(
default=_SCREAMING_SNAKE_CASE , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__lowercase : Optional[int] = field(
default=_SCREAMING_SNAKE_CASE , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = {}
if self.train_dir is not None:
UpperCamelCase = self.train_dir
if self.validation_dir is not None:
UpperCamelCase = self.validation_dir
UpperCamelCase = data_files if data_files else None
@dataclass
class lowercase :
__lowercase : str = field(
default=_SCREAMING_SNAKE_CASE , metadata={
"help": (
"The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a "
"checkpoint identifier on the hub. "
"Don't set if you want to train a model from scratch."
)
} , )
__lowercase : Optional[str] = field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(_SCREAMING_SNAKE_CASE )} , )
__lowercase : Optional[str] = field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__lowercase : Optional[str] = field(
default=_SCREAMING_SNAKE_CASE , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
__lowercase : Optional[str] = field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store (cache) the pretrained models/datasets downloaded from the hub"} , )
__lowercase : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__lowercase : str = field(default=_SCREAMING_SNAKE_CASE , metadata={"help": "Name or path of preprocessor config."} )
__lowercase : bool = field(
default=_SCREAMING_SNAKE_CASE , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
__lowercase : Optional[int] = field(
default=_SCREAMING_SNAKE_CASE , metadata={
"help": (
"The size (resolution) of each image. If not specified, will use `image_size` of the configuration."
)
} , )
__lowercase : Optional[int] = field(
default=_SCREAMING_SNAKE_CASE , metadata={
"help": (
"The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration."
)
} , )
__lowercase : Optional[int] = field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "Stride to use for the encoder."} , )
class lowercase :
def __init__( self , A_=192 , A_=32 , A_=4 , A_=0.6 ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = input_size
UpperCamelCase = mask_patch_size
UpperCamelCase = model_patch_size
UpperCamelCase = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError('Input size must be divisible by mask patch size' )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError('Mask patch size must be divisible by model patch size' )
UpperCamelCase = self.input_size // self.mask_patch_size
UpperCamelCase = self.mask_patch_size // self.model_patch_size
UpperCamelCase = self.rand_size**2
UpperCamelCase = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self ) -> str:
"""simple docstring"""
UpperCamelCase = np.random.permutation(self.token_count )[: self.mask_count]
UpperCamelCase = np.zeros(self.token_count , dtype=A_ )
UpperCamelCase = 1
UpperCamelCase = mask.reshape((self.rand_size, self.rand_size) )
UpperCamelCase = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def A ( lowercase ) -> Tuple:
'''simple docstring'''
UpperCamelCase = torch.stack([example['pixel_values'] for example in examples] )
UpperCamelCase = torch.stack([example['mask'] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def A ( ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase , UpperCamelCase , UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase , UpperCamelCase , UpperCamelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mim' , lowercase , lowercase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase = training_args.get_process_log_level()
logger.setLevel(lowercase )
transformers.utils.logging.set_verbosity(lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
UpperCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
UpperCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCamelCase = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowercase ) and data_args.train_val_split > 0.0:
UpperCamelCase = ds['train'].train_test_split(data_args.train_val_split )
UpperCamelCase = split['train']
UpperCamelCase = split['test']
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
UpperCamelCase = AutoConfig.from_pretrained(model_args.config_name_or_path , **lowercase )
elif model_args.model_name_or_path:
UpperCamelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowercase )
else:
UpperCamelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(f'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(f'''New config: {config}''' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(lowercase , 'decoder_type' ):
UpperCamelCase = 'simmim'
# adapt config
UpperCamelCase = model_args.image_size if model_args.image_size is not None else config.image_size
UpperCamelCase = model_args.patch_size if model_args.patch_size is not None else config.patch_size
UpperCamelCase = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
'image_size': model_args.image_size,
'patch_size': model_args.patch_size,
'encoder_stride': model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
UpperCamelCase = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **lowercase )
elif model_args.model_name_or_path:
UpperCamelCase = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **lowercase )
else:
UpperCamelCase = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
UpperCamelCase = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
UpperCamelCase = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
UpperCamelCase = AutoModelForMaskedImageModeling.from_config(lowercase )
if training_args.do_train:
UpperCamelCase = ds['train'].column_names
else:
UpperCamelCase = ds['validation'].column_names
if data_args.image_column_name is not None:
UpperCamelCase = data_args.image_column_name
elif "image" in column_names:
UpperCamelCase = 'image'
elif "img" in column_names:
UpperCamelCase = 'img'
else:
UpperCamelCase = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
UpperCamelCase = Compose(
[
Lambda(lambda lowercase : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.6_7, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
UpperCamelCase = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(lowercase ):
UpperCamelCase = [transforms(lowercase ) for image in examples[image_column_name]]
UpperCamelCase = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
UpperCamelCase = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(lowercase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
UpperCamelCase = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(lowercase )
# Initialize our trainer
UpperCamelCase = Trainer(
model=lowercase , args=lowercase , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=lowercase , data_collator=lowercase , )
# Training
if training_args.do_train:
UpperCamelCase = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase = last_checkpoint
UpperCamelCase = trainer.train(resume_from_checkpoint=lowercase )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase = trainer.evaluate()
trainer.log_metrics('eval' , lowercase )
trainer.save_metrics('eval' , lowercase )
# Write model card and (optionally) push to hub
UpperCamelCase = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'masked-image-modeling',
'dataset': data_args.dataset_name,
'tags': ['masked-image-modeling'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase )
else:
trainer.create_model_card(**lowercase )
if __name__ == "__main__":
main()
| 110 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : torch.FloatTensor
class lowercase ( nn.Module ):
def __init__( self , A_=3 , A_=3 , A_=("DownEncoderBlock2D",) , A_=(64,) , A_=2 , A_=32 , A_="silu" , A_=True , ) -> List[Any]:
"""simple docstring"""
super().__init__()
UpperCamelCase = layers_per_block
UpperCamelCase = torch.nn.Convad(
A_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
UpperCamelCase = None
UpperCamelCase = nn.ModuleList([] )
# down
UpperCamelCase = block_out_channels[0]
for i, down_block_type in enumerate(A_ ):
UpperCamelCase = output_channel
UpperCamelCase = block_out_channels[i]
UpperCamelCase = i == len(A_ ) - 1
UpperCamelCase = get_down_block(
A_ , num_layers=self.layers_per_block , in_channels=A_ , out_channels=A_ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=A_ , resnet_groups=A_ , attention_head_dim=A_ , temb_channels=A_ , )
self.down_blocks.append(A_ )
# mid
UpperCamelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=A_ , output_scale_factor=1 , resnet_time_scale_shift='default' , attention_head_dim=block_out_channels[-1] , resnet_groups=A_ , temb_channels=A_ , )
# out
UpperCamelCase = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=A_ , eps=1e-6 )
UpperCamelCase = nn.SiLU()
UpperCamelCase = 2 * out_channels if double_z else out_channels
UpperCamelCase = nn.Convad(block_out_channels[-1] , A_ , 3 , padding=1 )
UpperCamelCase = False
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = x
UpperCamelCase = self.conv_in(A_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(A_ ):
def custom_forward(*A_ ):
return module(*A_ )
return custom_forward
# down
if is_torch_version('>=' , '1.11.0' ):
for down_block in self.down_blocks:
UpperCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(A_ ) , A_ , use_reentrant=A_ )
# middle
UpperCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , A_ , use_reentrant=A_ )
else:
for down_block in self.down_blocks:
UpperCamelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(A_ ) , A_ )
# middle
UpperCamelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , A_ )
else:
# down
for down_block in self.down_blocks:
UpperCamelCase = down_block(A_ )
# middle
UpperCamelCase = self.mid_block(A_ )
# post-process
UpperCamelCase = self.conv_norm_out(A_ )
UpperCamelCase = self.conv_act(A_ )
UpperCamelCase = self.conv_out(A_ )
return sample
class lowercase ( nn.Module ):
def __init__( self , A_=3 , A_=3 , A_=("UpDecoderBlock2D",) , A_=(64,) , A_=2 , A_=32 , A_="silu" , A_="group" , ) -> Tuple:
"""simple docstring"""
super().__init__()
UpperCamelCase = layers_per_block
UpperCamelCase = nn.Convad(
A_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
UpperCamelCase = None
UpperCamelCase = nn.ModuleList([] )
UpperCamelCase = in_channels if norm_type == 'spatial' else None
# mid
UpperCamelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=A_ , output_scale_factor=1 , resnet_time_scale_shift='default' if norm_type == 'group' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=A_ , temb_channels=A_ , )
# up
UpperCamelCase = list(reversed(A_ ) )
UpperCamelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(A_ ):
UpperCamelCase = output_channel
UpperCamelCase = reversed_block_out_channels[i]
UpperCamelCase = i == len(A_ ) - 1
UpperCamelCase = get_up_block(
A_ , num_layers=self.layers_per_block + 1 , in_channels=A_ , out_channels=A_ , prev_output_channel=A_ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=A_ , resnet_groups=A_ , attention_head_dim=A_ , temb_channels=A_ , resnet_time_scale_shift=A_ , )
self.up_blocks.append(A_ )
UpperCamelCase = output_channel
# out
if norm_type == "spatial":
UpperCamelCase = SpatialNorm(block_out_channels[0] , A_ )
else:
UpperCamelCase = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=A_ , eps=1e-6 )
UpperCamelCase = nn.SiLU()
UpperCamelCase = nn.Convad(block_out_channels[0] , A_ , 3 , padding=1 )
UpperCamelCase = False
def __UpperCamelCase ( self , A_ , A_=None ) -> Dict:
"""simple docstring"""
UpperCamelCase = z
UpperCamelCase = self.conv_in(A_ )
UpperCamelCase = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(A_ ):
def custom_forward(*A_ ):
return module(*A_ )
return custom_forward
if is_torch_version('>=' , '1.11.0' ):
# middle
UpperCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , A_ , A_ , use_reentrant=A_ )
UpperCamelCase = sample.to(A_ )
# up
for up_block in self.up_blocks:
UpperCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(A_ ) , A_ , A_ , use_reentrant=A_ )
else:
# middle
UpperCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , A_ , A_ )
UpperCamelCase = sample.to(A_ )
# up
for up_block in self.up_blocks:
UpperCamelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(A_ ) , A_ , A_ )
else:
# middle
UpperCamelCase = self.mid_block(A_ , A_ )
UpperCamelCase = sample.to(A_ )
# up
for up_block in self.up_blocks:
UpperCamelCase = up_block(A_ , A_ )
# post-process
if latent_embeds is None:
UpperCamelCase = self.conv_norm_out(A_ )
else:
UpperCamelCase = self.conv_norm_out(A_ , A_ )
UpperCamelCase = self.conv_act(A_ )
UpperCamelCase = self.conv_out(A_ )
return sample
class lowercase ( nn.Module ):
def __init__( self , A_ , A_ , A_ , A_=None , A_="random" , A_=False , A_=True ) -> Tuple:
"""simple docstring"""
super().__init__()
UpperCamelCase = n_e
UpperCamelCase = vq_embed_dim
UpperCamelCase = beta
UpperCamelCase = legacy
UpperCamelCase = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
UpperCamelCase = remap
if self.remap is not None:
self.register_buffer('used' , torch.tensor(np.load(self.remap ) ) )
UpperCamelCase = self.used.shape[0]
UpperCamelCase = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
UpperCamelCase = self.re_embed
UpperCamelCase = self.re_embed + 1
print(
F'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
F'''Using {self.unknown_index} for unknown indices.''' )
else:
UpperCamelCase = n_e
UpperCamelCase = sane_index_shape
def __UpperCamelCase ( self , A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = inds.shape
assert len(A_ ) > 1
UpperCamelCase = inds.reshape(ishape[0] , -1 )
UpperCamelCase = self.used.to(A_ )
UpperCamelCase = (inds[:, :, None] == used[None, None, ...]).long()
UpperCamelCase = match.argmax(-1 )
UpperCamelCase = match.sum(2 ) < 1
if self.unknown_index == "random":
UpperCamelCase = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
UpperCamelCase = self.unknown_index
return new.reshape(A_ )
def __UpperCamelCase ( self , A_ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = inds.shape
assert len(A_ ) > 1
UpperCamelCase = inds.reshape(ishape[0] , -1 )
UpperCamelCase = self.used.to(A_ )
if self.re_embed > self.used.shape[0]: # extra token
UpperCamelCase = 0 # simply set to zero
UpperCamelCase = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , A_ )
return back.reshape(A_ )
def __UpperCamelCase ( self , A_ ) -> List[str]:
"""simple docstring"""
# reshape z -> (batch, height, width, channel) and flatten
UpperCamelCase = z.permute(0 , 2 , 3 , 1 ).contiguous()
UpperCamelCase = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
UpperCamelCase = torch.argmin(torch.cdist(A_ , self.embedding.weight ) , dim=1 )
UpperCamelCase = self.embedding(A_ ).view(z.shape )
UpperCamelCase = None
UpperCamelCase = None
# compute loss for embedding
if not self.legacy:
UpperCamelCase = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
UpperCamelCase = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
UpperCamelCase = z + (z_q - z).detach()
# reshape back to match original input shape
UpperCamelCase = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
UpperCamelCase = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
UpperCamelCase = self.remap_to_used(A_ )
UpperCamelCase = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
UpperCamelCase = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __UpperCamelCase ( self , A_ , A_ ) -> Optional[int]:
"""simple docstring"""
# shape specifying (batch, height, width, channel)
if self.remap is not None:
UpperCamelCase = indices.reshape(shape[0] , -1 ) # add batch axis
UpperCamelCase = self.unmap_to_all(A_ )
UpperCamelCase = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
UpperCamelCase = self.embedding(A_ )
if shape is not None:
UpperCamelCase = z_q.view(A_ )
# reshape back to match original input shape
UpperCamelCase = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_=False ) -> Any:
"""simple docstring"""
UpperCamelCase = parameters
UpperCamelCase , UpperCamelCase = torch.chunk(A_ , 2 , dim=1 )
UpperCamelCase = torch.clamp(self.logvar , -30.0 , 20.0 )
UpperCamelCase = deterministic
UpperCamelCase = torch.exp(0.5 * self.logvar )
UpperCamelCase = torch.exp(self.logvar )
if self.deterministic:
UpperCamelCase = UpperCamelCase = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __UpperCamelCase ( self , A_ = None ) -> torch.FloatTensor:
"""simple docstring"""
# make sure sample is on the same device as the parameters and has same dtype
UpperCamelCase = randn_tensor(
self.mean.shape , generator=A_ , device=self.parameters.device , dtype=self.parameters.dtype )
UpperCamelCase = self.mean + self.std * sample
return x
def __UpperCamelCase ( self , A_=None ) -> Tuple:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __UpperCamelCase ( self , A_ , A_=[1, 2, 3] ) -> Optional[Any]:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
UpperCamelCase = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=A_ )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
return self.mean
| 110 | 1 |
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
_UpperCAmelCase : int = """T5Config"""
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = jnp.zeros_like(lowerCamelCase)
__lowerCAmelCase = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1])
__lowerCAmelCase = shifted_input_ids.at[:, 0].set(lowerCamelCase)
__lowerCAmelCase = jnp.where(shifted_input_ids == -1_0_0, lowerCamelCase, lowerCamelCase)
return shifted_input_ids
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = 'mt5'
__UpperCamelCase : Union[str, Any] = MTaConfig
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Dict = 'mt5'
__UpperCamelCase : Optional[Any] = MTaConfig
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = 'mt5'
__UpperCamelCase : List[Any] = MTaConfig
| 174 |
'''simple docstring'''
def __magic_name__( lowerCamelCase, lowerCamelCase):
assert x is not None
assert y is not None
__lowerCAmelCase = len(lowerCamelCase)
__lowerCAmelCase = len(lowerCamelCase)
# declaring the array for storing the dp values
__lowerCAmelCase = [[0] * (n + 1) for _ in range(m + 1)] # noqa: E741
for i in range(1, m + 1):
for j in range(1, n + 1):
__lowerCAmelCase = 1 if x[i - 1] == y[j - 1] else 0
__lowerCAmelCase = max(l[i - 1][j], l[i][j - 1], l[i - 1][j - 1] + match)
__lowerCAmelCase = ''''''
__lowerCAmelCase , __lowerCAmelCase = m, n
while i > 0 and j > 0:
__lowerCAmelCase = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
__lowerCAmelCase = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
_UpperCAmelCase : List[Any] = """AGGTAB"""
_UpperCAmelCase : int = """GXTXAYB"""
_UpperCAmelCase : Any = 4
_UpperCAmelCase : List[Any] = """GTAB"""
_UpperCAmelCase ,_UpperCAmelCase : Optional[int] = longest_common_subsequence(a, b)
print("""len =""", ln, """, sub-sequence =""", subseq)
import doctest
doctest.testmod()
| 174 | 1 |
'''simple docstring'''
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class A ( __snake_case , unittest.TestCase ):
__magic_name__ = BertTokenizer
__magic_name__ = BertTokenizerFast
__magic_name__ = True
__magic_name__ = True
__magic_name__ = filter_non_english
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
super().setUp()
A : int = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
A : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
A : Optional[Any] = '''UNwant\u00E9d,running'''
A : Optional[Any] = '''unwanted, running'''
return input_text, output_text
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : Any = self.tokenizer_class(self.vocab_file )
A : Optional[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(SCREAMING_SNAKE_CASE , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , [9, 6, 7, 12, 10, 11] )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
A : List[str] = self.get_tokenizer()
A : str = self.get_rust_tokenizer()
A : List[str] = '''UNwant\u00E9d,running'''
A : Optional[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE )
A : Union[str, Any] = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : List[Any] = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
A : Any = rust_tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : List[Any] = self.get_rust_tokenizer()
A : str = tokenizer.encode(SCREAMING_SNAKE_CASE )
A : List[str] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# With lower casing
A : List[str] = self.get_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE )
A : Dict = self.get_rust_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE )
A : Union[str, Any] = '''UNwant\u00E9d,running'''
A : Optional[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE )
A : Optional[Any] = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : List[Any] = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
A : Optional[int] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Dict = self.get_rust_tokenizer()
A : Optional[Any] = tokenizer.encode(SCREAMING_SNAKE_CASE )
A : str = rust_tokenizer.encode(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : Optional[Any] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Tuple = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : Union[str, Any] = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE , strip_accents=SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : List[Any] = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE , strip_accents=SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : List[Any] = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : Union[str, Any] = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Optional[int] = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE , strip_accents=SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : Optional[Any] = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE , strip_accents=SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : List[str] = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : Union[str, Any] = BasicTokenizer()
A : Optional[Any] = '''a\n\'ll !!to?\'d of, can\'t.'''
A : Dict = ['''a''', '''\'''', '''ll''', '''!''', '''!''', '''to''', '''?''', '''\'''', '''d''', '''of''', ''',''', '''can''', '''\'''', '''t''', '''.''']
self.assertListEqual(tokenizer.tokenize(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : str = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
A : List[str] = {}
for i, token in enumerate(SCREAMING_SNAKE_CASE ):
A : int = i
A : Any = WordpieceTokenizer(vocab=SCREAMING_SNAKE_CASE , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : int = self.get_tokenizer()
A : Any = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(SCREAMING_SNAKE_CASE ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Union[str, Any] = self.tokenizer_class.from_pretrained('''bert-base-uncased''' )
A : Tuple = tokenizer.encode('''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE )
A : str = tokenizer.encode('''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE )
A : Tuple = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE )
A : List[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A : Optional[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
A : Dict = F'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
A : Optional[int] = tokenizer_r.encode_plus(
SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , return_token_type_ids=SCREAMING_SNAKE_CASE , return_offsets_mapping=SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , )
A : Optional[Any] = tokenizer_r.do_lower_case if hasattr(SCREAMING_SNAKE_CASE , '''do_lower_case''' ) else False
A : Dict = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Union[str, Any] = ['''的''', '''人''', '''有''']
A : str = ''''''.join(SCREAMING_SNAKE_CASE )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A : List[Any] = True
A : List[str] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
A : Dict = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
A : Optional[int] = tokenizer_p.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
A : Optional[int] = tokenizer_r.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
A : Optional[int] = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE )
A : Tuple = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Any = False
A : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
A : Any = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
A : str = tokenizer_r.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
A : int = tokenizer_p.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
A : int = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE )
A : Tuple = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE )
# it is expected that only the first Chinese character is not preceded by "##".
A : List[str] = [
F'##{token}' if idx != 0 else token for idx, token in enumerate(SCREAMING_SNAKE_CASE )
]
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
| 311 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A ( __snake_case ):
__magic_name__ = (UniPCMultistepScheduler,)
__magic_name__ = (('''num_inference_steps''', 25),)
def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
A : str = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''solver_type''': '''bh2''',
}
config.update(**SCREAMING_SNAKE_CASE )
return config
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=0 , **SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
A : List[Any] = dict(self.forward_default_kwargs )
A : Union[str, Any] = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE )
A : Optional[Any] = self.dummy_sample
A : int = 0.1 * sample
A : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
A : Optional[Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
A : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
A : List[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE )
A : List[Any] = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
A : Dict = dummy_past_residuals[: new_scheduler.config.solver_order]
A, A : Tuple = sample, sample
for t in range(SCREAMING_SNAKE_CASE , time_step + scheduler.config.solver_order + 1 ):
A : Any = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
A : Optional[Any] = new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=0 , **SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
A : Optional[Any] = dict(self.forward_default_kwargs )
A : Tuple = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE )
A : List[Any] = self.dummy_sample
A : int = 0.1 * sample
A : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
A : Optional[int] = self.get_scheduler_config()
A : Any = scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
A : int = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE )
A : int = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
A : Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
A : List[Any] = new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
if scheduler is None:
A : Dict = self.scheduler_classes[0]
A : Union[str, Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
A : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE )
A : Tuple = self.scheduler_classes[0]
A : Union[str, Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
A : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE )
A : int = 10
A : Tuple = self.dummy_model()
A : Any = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
A : int = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
return sample
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Tuple = dict(self.forward_default_kwargs )
A : List[Any] = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE )
for scheduler_class in self.scheduler_classes:
A : Dict = self.get_scheduler_config()
A : Dict = scheduler_class(**SCREAMING_SNAKE_CASE )
A : Optional[Any] = self.dummy_sample
A : Optional[int] = 0.1 * sample
if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE , '''set_timesteps''' ):
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE , '''set_timesteps''' ):
A : Tuple = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A : Dict = [residual + 0.2, residual + 0.15, residual + 0.10]
A : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
A : List[Any] = scheduler.timesteps[5]
A : Dict = scheduler.timesteps[6]
A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : Union[str, Any] = UniPCMultistepScheduler(**self.get_scheduler_config() )
A : List[Any] = self.full_loop(scheduler=SCREAMING_SNAKE_CASE )
A : List[str] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_464 ) < 1e-3
A : Dict = DPMSolverSinglestepScheduler.from_config(scheduler.config )
A : Optional[int] = DEISMultistepScheduler.from_config(scheduler.config )
A : List[Any] = DPMSolverMultistepScheduler.from_config(scheduler.config )
A : List[Any] = UniPCMultistepScheduler.from_config(scheduler.config )
A : Optional[Any] = self.full_loop(scheduler=SCREAMING_SNAKE_CASE )
A : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_464 ) < 1e-3
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , sample_max_value=SCREAMING_SNAKE_CASE , solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , )
A : Dict = self.full_loop(
solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , )
assert not torch.isnan(SCREAMING_SNAKE_CASE ).any(), "Samples have nan numbers"
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE )
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE , time_step=0 )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : int = self.full_loop()
A : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_464 ) < 1e-3
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : List[Any] = self.full_loop(prediction_type='''v_prediction''' )
A : Any = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.1_014 ) < 1e-3
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : Dict = self.scheduler_classes[0]
A : List[Any] = self.get_scheduler_config(thresholding=SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0 )
A : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE )
A : Tuple = 10
A : Union[str, Any] = self.dummy_model()
A : Dict = self.dummy_sample_deter.half()
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
A : Dict = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
assert sample.dtype == torch.floataa
def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
for scheduler_class in self.scheduler_classes:
A : Dict = self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
A : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 311 | 1 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
lowerCamelCase__ = TypeVar("""T""")
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> int:
return (position - 1) // 2
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> int:
return (2 * position) + 1
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> int:
return (2 * position) + 2
class A__ ( Generic[T] ):
def __init__( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : list[tuple[T, int]] = []
lowerCAmelCase__ : dict[T, int] = {}
lowerCAmelCase__ : int = 0
def __len__( self : List[str] ):
'''simple docstring'''
return self.elements
def __repr__( self : str ):
'''simple docstring'''
return str(self.heap )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return self.elements == 0
def _lowerCamelCase ( self : List[Any] , a : T , a : int ):
'''simple docstring'''
self.heap.append((elem, weight) )
lowerCAmelCase__ : Optional[int] = self.elements
self.elements += 1
self._bubble_up(a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
lowerCAmelCase__ , lowerCAmelCase__ : str = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.heap[0]
self._bubble_down(a )
return elem
def _lowerCamelCase ( self : Optional[int] , a : T , a : int ):
'''simple docstring'''
lowerCAmelCase__ : int = self.position_map[elem]
lowerCAmelCase__ : Any = (elem, weight)
if position > 0:
lowerCAmelCase__ : Optional[int] = get_parent_position(a )
lowerCAmelCase__ , lowerCAmelCase__ : str = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(a )
else:
self._bubble_down(a )
else:
self._bubble_down(a )
def _lowerCamelCase ( self : Tuple , a : T ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.position_map[elem]
if curr_pos == 0:
return None
lowerCAmelCase__ : Optional[Any] = get_parent_position(a )
lowerCAmelCase__ , lowerCAmelCase__ : int = self.heap[curr_pos]
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(a , a )
return self._bubble_up(a )
return None
def _lowerCamelCase ( self : Optional[int] , a : T ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.position_map[elem]
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.heap[curr_pos]
lowerCAmelCase__ : List[Any] = get_child_left_position(a )
lowerCAmelCase__ : int = get_child_right_position(a )
if child_left_position < self.elements and child_right_position < self.elements:
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.heap[child_left_position]
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(a , a )
return self._bubble_down(a )
if child_left_position < self.elements:
lowerCAmelCase__ , lowerCAmelCase__ : str = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(a , a )
return self._bubble_down(a )
else:
return None
if child_right_position < self.elements:
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(a , a )
return self._bubble_down(a )
return None
def _lowerCamelCase ( self : Tuple , a : int , a : int ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.heap[nodea_pos][0]
lowerCAmelCase__ : str = self.heap[nodea_pos][0]
lowerCAmelCase__ , lowerCAmelCase__ : Any = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
lowerCAmelCase__ : Tuple = nodea_pos
lowerCAmelCase__ : List[str] = nodea_pos
class A__ ( Generic[T] ):
def __init__( self : str ):
'''simple docstring'''
lowerCAmelCase__ : dict[T, dict[T, int]] = {}
lowerCAmelCase__ : int = 0
def __repr__( self : List[str] ):
'''simple docstring'''
return str(self.connections )
def __len__( self : Union[str, Any] ):
'''simple docstring'''
return self.nodes
def _lowerCamelCase ( self : Optional[int] , a : T ):
'''simple docstring'''
if node not in self.connections:
lowerCAmelCase__ : Optional[Any] = {}
self.nodes += 1
def _lowerCamelCase ( self : Tuple , a : T , a : T , a : int ):
'''simple docstring'''
self.add_node(a )
self.add_node(a )
lowerCAmelCase__ : Optional[Any] = weight
lowerCAmelCase__ : Tuple = weight
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , ) -> tuple[dict[T, int], dict[T, T | None]]:
lowerCAmelCase__ : dict[T, int] = {node: maxsize for node in graph.connections}
lowerCAmelCase__ : dict[T, T | None] = {node: None for node in graph.connections}
lowerCAmelCase__ : MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if priority_queue.is_empty():
return dist, parent
# initialization
lowerCAmelCase__ : Union[str, Any] = priority_queue.extract_min()
lowerCAmelCase__ : Optional[int] = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
lowerCAmelCase__ : List[Any] = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(SCREAMING_SNAKE_CASE_ , dist[neighbour] )
lowerCAmelCase__ : Any = node
# running prim's algorithm
while not priority_queue.is_empty():
lowerCAmelCase__ : Tuple = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
lowerCAmelCase__ : Optional[Any] = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(SCREAMING_SNAKE_CASE_ , dist[neighbour] )
lowerCAmelCase__ : List[str] = node
return dist, parent | 212 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=__magic_name__ )
class A__ ( __magic_name__ ):
lowercase = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
lowercase = Features({'audio': Audio()} )
lowercase = Features({'labels': ClassLabel} )
lowercase = "audio"
lowercase = "labels"
def _lowerCamelCase ( self : Dict , a : Tuple ):
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , a ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
lowerCAmelCase__ : Tuple = copy.deepcopy(self )
lowerCAmelCase__ : List[Any] = self.label_schema.copy()
lowerCAmelCase__ : List[Any] = features[self.label_column]
lowerCAmelCase__ : Optional[int] = label_schema
return task_template
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return {
self.audio_column: "audio",
self.label_column: "labels",
} | 212 | 1 |
"""simple docstring"""
import requests
SCREAMING_SNAKE_CASE = "" # <-- Put your OpenWeatherMap appid here!
SCREAMING_SNAKE_CASE = "https://api.openweathermap.org/data/2.5/"
def _SCREAMING_SNAKE_CASE ( lowercase_ = "Chicago" , lowercase_ = APPID ) -> dict:
return requests.get(URL_BASE + "weather" , params=locals() ).json()
def _SCREAMING_SNAKE_CASE ( lowercase_ = "Kolkata, India" , lowercase_ = APPID ) -> dict:
return requests.get(URL_BASE + "forecast" , params=locals() ).json()
def _SCREAMING_SNAKE_CASE ( lowercase_ = 5_5.6_8 , lowercase_ = 1_2.5_7 , lowercase_ = APPID ) -> dict:
return requests.get(URL_BASE + "onecall" , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
SCREAMING_SNAKE_CASE = input("Enter a location:").strip()
if location:
pprint(current_weather(location))
else:
break
| 230 |
"""simple docstring"""
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
A__ = model.config
A__ = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=1_28 , )
A__ = MBartConfig(
is_decoder=lowercase_ , is_encoder_decoder=lowercase_ , add_cross_attention=lowercase_ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=lowercase_ , add_final_layer_norm=lowercase_ , )
return encoder_config, decoder_config
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
if "encoder.model" in name:
A__ = name.replace("encoder.model" , "encoder" )
if "decoder.model" in name:
A__ = name.replace("decoder.model" , "decoder" )
if "patch_embed.proj" in name:
A__ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
A__ = name.replace("patch_embed.norm" , "embeddings.norm" )
if name.startswith("encoder" ):
if "layers" in name:
A__ = "encoder." + name
if "attn.proj" in name:
A__ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "mask" not in name:
A__ = name.replace("attn" , "attention.self" )
if "norm1" in name:
A__ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
A__ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
A__ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
A__ = name.replace("mlp.fc2" , "output.dense" )
if name == "encoder.norm.weight":
A__ = "encoder.layernorm.weight"
if name == "encoder.norm.bias":
A__ = "encoder.layernorm.bias"
return name
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Any:
for key in orig_state_dict.copy().keys():
A__ = orig_state_dict.pop(lowercase_ )
if "qkv" in key:
A__ = key.split("." )
A__ = int(key_split[3] )
A__ = int(key_split[5] )
A__ = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
A__ = val[:dim, :]
A__ = val[dim : dim * 2, :]
A__ = val[-dim:, :]
else:
A__ = val[:dim]
A__ = val[dim : dim * 2]
A__ = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
A__ = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=None , lowercase_=False ) -> Dict:
# load original model
A__ = DonutModel.from_pretrained(lowercase_ ).eval()
# load HuggingFace model
A__, A__ = get_configs(lowercase_ )
A__ = DonutSwinModel(lowercase_ )
A__ = MBartForCausalLM(lowercase_ )
A__ = VisionEncoderDecoderModel(encoder=lowercase_ , decoder=lowercase_ )
model.eval()
A__ = original_model.state_dict()
A__ = convert_state_dict(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
# verify results on scanned document
A__ = load_dataset("hf-internal-testing/example-documents" )
A__ = dataset["test"][0]["image"].convert("RGB" )
A__ = XLMRobertaTokenizerFast.from_pretrained(lowercase_ , from_slow=lowercase_ )
A__ = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
A__ = DonutProcessor(lowercase_ , lowercase_ )
A__ = processor(lowercase_ , return_tensors="pt" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
A__ = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
A__ = "When is the coffee break?"
A__ = task_prompt.replace("{user_input}" , lowercase_ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
A__ = "<s_rvlcdip>"
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
A__ = "<s_cord>"
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
A__ = "s_cord-v2>"
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
A__ = "<s_zhtrainticket>"
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
A__ = "hello world"
else:
raise ValueError("Model name not supported" )
A__ = original_model.decoder.tokenizer(lowercase_ , add_special_tokens=lowercase_ , return_tensors="pt" )[
"input_ids"
]
A__ = original_model.encoder.model.patch_embed(lowercase_ )
A__, A__ = model.encoder.embeddings(lowercase_ )
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-3 )
# verify encoder hidden states
A__ = original_model.encoder(lowercase_ )
A__ = model.encoder(lowercase_ ).last_hidden_state
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-2 )
# verify decoder hidden states
A__ = original_model(lowercase_ , lowercase_ , lowercase_ ).logits
A__ = model(lowercase_ , decoder_input_ids=lowercase_ ).logits
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
processor.save_pretrained(lowercase_ )
if push_to_hub:
model.push_to_hub("nielsr/" + model_name.split("/" )[-1] , commit_message="Update model" )
processor.push_to_hub("nielsr/" + model_name.split("/" )[-1] , commit_message="Update model" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="naver-clova-ix/donut-base-finetuned-docvqa",
required=False,
type=str,
help="Name of the original model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
required=False,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub.",
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 230 | 1 |
import datasets
from .evaluate import evaluate
UpperCAmelCase_ : Optional[Any] = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n'
UpperCAmelCase_ : int = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n'
UpperCAmelCase_ : List[Any] = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': {
'id': datasets.Value('string' ),
'prediction_text': datasets.features.Sequence(datasets.Value('string' ) ),
},
'references': {
'id': datasets.Value('string' ),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
},
} ) , codebase_urls=['https://www.atticusprojectai.org/cuad'] , reference_urls=['https://www.atticusprojectai.org/cuad'] , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple ) -> Union[str, Any]:
a_ : int = {prediction['id']: prediction['prediction_text'] for prediction in predictions}
a_ : List[Any] = [
{
'paragraphs': [
{
'qas': [
{
'answers': [{'text': answer_text} for answer_text in ref['answers']['text']],
'id': ref['id'],
}
for ref in references
]
}
]
}
]
a_ : Any = evaluate(dataset=SCREAMING_SNAKE_CASE__ , predictions=SCREAMING_SNAKE_CASE__ )
return score
| 32 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : Union[str, Any] ):
"""simple docstring"""
stooge(snake_case__ , 0 , len(snake_case__ ) - 1 )
return arr
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : int ):
"""simple docstring"""
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
_snake_case , _snake_case : Tuple = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
_snake_case : Dict = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(snake_case__ , snake_case__ , (h - t) )
# Recursively sort last 2/3 elements
stooge(snake_case__ , i + t , (snake_case__) )
# Recursively sort first 2/3 elements
stooge(snake_case__ , snake_case__ , (h - t) )
if __name__ == "__main__":
A_ = input('''Enter numbers separated by a comma:\n''').strip()
A_ = [int(item) for item in user_input.split(''',''')]
print(stooge_sort(unsorted))
| 64 | 0 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class a_ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self : Tuple):
'''simple docstring'''
lowerCAmelCase__ = 0
def __snake_case ( self : str):
'''simple docstring'''
lowerCAmelCase__ = AutoImageProcessor.from_pretrained('openai/clip-vit-base-patch32')
self.assertIsInstance(__snake_case , __snake_case)
def __snake_case ( self : List[Any]):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = Path(__snake_case) / 'preprocessor_config.json'
lowerCAmelCase__ = Path(__snake_case) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(__snake_case , 'w') , )
json.dump({'model_type': 'clip'} , open(__snake_case , 'w'))
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(__snake_case)
self.assertIsInstance(__snake_case , __snake_case)
def __snake_case ( self : List[str]):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = Path(__snake_case) / 'preprocessor_config.json'
lowerCAmelCase__ = Path(__snake_case) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(__snake_case , 'w') , )
json.dump({'model_type': 'clip'} , open(__snake_case , 'w'))
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(__snake_case)
self.assertIsInstance(__snake_case , __snake_case)
def __snake_case ( self : Tuple):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = CLIPConfig()
# Create a dummy config file with image_proceesor_type
lowerCAmelCase__ = Path(__snake_case) / 'preprocessor_config.json'
lowerCAmelCase__ = Path(__snake_case) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(__snake_case , 'w') , )
json.dump({'model_type': 'clip'} , open(__snake_case , 'w'))
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(__snake_case).to_dict()
config_dict.pop('image_processor_type')
lowerCAmelCase__ = CLIPImageProcessor(**__snake_case)
# save in new folder
model_config.save_pretrained(__snake_case)
config.save_pretrained(__snake_case)
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(__snake_case)
# make sure private variable is not incorrectly saved
lowerCAmelCase__ = json.loads(config.to_json_string())
self.assertTrue('_processor_class' not in dict_as_saved)
self.assertIsInstance(__snake_case , __snake_case)
def __snake_case ( self : List[Any]):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = Path(__snake_case) / 'preprocessor_config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(__snake_case , 'w') , )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(__snake_case)
self.assertIsInstance(__snake_case , __snake_case)
def __snake_case ( self : int):
'''simple docstring'''
with self.assertRaisesRegex(
__snake_case , 'clip-base is not a local folder and is not a valid model identifier'):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained('clip-base')
def __snake_case ( self : Tuple):
'''simple docstring'''
with self.assertRaisesRegex(
__snake_case , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(__snake_case , revision='aaaaaa')
def __snake_case ( self : str):
'''simple docstring'''
with self.assertRaisesRegex(
__snake_case , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained('hf-internal-testing/config-no-model')
def __snake_case ( self : List[str]):
'''simple docstring'''
with self.assertRaises(__snake_case):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor')
# If remote code is disabled, we can't load this config.
with self.assertRaises(__snake_case):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=__snake_case)
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=__snake_case)
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor')
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__snake_case)
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(__snake_case , trust_remote_code=__snake_case)
self.assertEqual(reloaded_image_processor.__class__.__name__ , 'NewImageProcessor')
def __snake_case ( self : List[str]):
'''simple docstring'''
try:
AutoConfig.register('custom' , __snake_case)
AutoImageProcessor.register(__snake_case , __snake_case)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__snake_case):
AutoImageProcessor.register(__snake_case , __snake_case)
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = Path(__snake_case) / 'preprocessor_config.json'
lowerCAmelCase__ = Path(__snake_case) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(__snake_case , 'w') , )
json.dump({'model_type': 'clip'} , open(__snake_case , 'w'))
lowerCAmelCase__ = CustomImageProcessor.from_pretrained(__snake_case)
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__snake_case)
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(__snake_case)
self.assertIsInstance(__snake_case , __snake_case)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def __snake_case ( self : Dict):
'''simple docstring'''
class a_ ( A__ ):
'''simple docstring'''
UpperCAmelCase_ = True
try:
AutoConfig.register('custom' , __snake_case)
AutoImageProcessor.register(__snake_case , __snake_case)
# If remote code is not set, the default is to use local
lowerCAmelCase__ = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor')
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor')
self.assertTrue(image_processor.is_local)
# If remote code is disabled, we load the local one.
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=__snake_case)
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor')
self.assertTrue(image_processor.is_local)
# If remote is enabled, we load from the Hub
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=__snake_case)
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor')
self.assertTrue(not hasattr(__snake_case , 'is_local'))
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 353 | def __lowerCamelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = len(lowerCAmelCase__ )
for i in range(lowerCAmelCase__ ):
for j in range(i + 1 , lowerCAmelCase__ ):
if numbers[j] < numbers[i]:
lowerCAmelCase__ , lowerCAmelCase__ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
lowerCAmelCase__ = input('Enter numbers separated by a comma:\n').strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted))
| 119 | 0 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__snake_case :Tuple = logging.get_logger(__name__)
__snake_case :Tuple = OrderedDict(
[
('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''),
('''beit''', '''BeitFeatureExtractor'''),
('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''),
('''clap''', '''ClapFeatureExtractor'''),
('''clip''', '''CLIPFeatureExtractor'''),
('''clipseg''', '''ViTFeatureExtractor'''),
('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''),
('''convnext''', '''ConvNextFeatureExtractor'''),
('''cvt''', '''ConvNextFeatureExtractor'''),
('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''),
('''data2vec-vision''', '''BeitFeatureExtractor'''),
('''deformable_detr''', '''DeformableDetrFeatureExtractor'''),
('''deit''', '''DeiTFeatureExtractor'''),
('''detr''', '''DetrFeatureExtractor'''),
('''dinat''', '''ViTFeatureExtractor'''),
('''donut-swin''', '''DonutFeatureExtractor'''),
('''dpt''', '''DPTFeatureExtractor'''),
('''encodec''', '''EncodecFeatureExtractor'''),
('''flava''', '''FlavaFeatureExtractor'''),
('''glpn''', '''GLPNFeatureExtractor'''),
('''groupvit''', '''CLIPFeatureExtractor'''),
('''hubert''', '''Wav2Vec2FeatureExtractor'''),
('''imagegpt''', '''ImageGPTFeatureExtractor'''),
('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''),
('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''),
('''levit''', '''LevitFeatureExtractor'''),
('''maskformer''', '''MaskFormerFeatureExtractor'''),
('''mctct''', '''MCTCTFeatureExtractor'''),
('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''),
('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''),
('''mobilevit''', '''MobileViTFeatureExtractor'''),
('''nat''', '''ViTFeatureExtractor'''),
('''owlvit''', '''OwlViTFeatureExtractor'''),
('''perceiver''', '''PerceiverFeatureExtractor'''),
('''poolformer''', '''PoolFormerFeatureExtractor'''),
('''regnet''', '''ConvNextFeatureExtractor'''),
('''resnet''', '''ConvNextFeatureExtractor'''),
('''segformer''', '''SegformerFeatureExtractor'''),
('''sew''', '''Wav2Vec2FeatureExtractor'''),
('''sew-d''', '''Wav2Vec2FeatureExtractor'''),
('''speech_to_text''', '''Speech2TextFeatureExtractor'''),
('''speecht5''', '''SpeechT5FeatureExtractor'''),
('''swiftformer''', '''ViTFeatureExtractor'''),
('''swin''', '''ViTFeatureExtractor'''),
('''swinv2''', '''ViTFeatureExtractor'''),
('''table-transformer''', '''DetrFeatureExtractor'''),
('''timesformer''', '''VideoMAEFeatureExtractor'''),
('''tvlt''', '''TvltFeatureExtractor'''),
('''unispeech''', '''Wav2Vec2FeatureExtractor'''),
('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''),
('''van''', '''ConvNextFeatureExtractor'''),
('''videomae''', '''VideoMAEFeatureExtractor'''),
('''vilt''', '''ViltFeatureExtractor'''),
('''vit''', '''ViTFeatureExtractor'''),
('''vit_mae''', '''ViTFeatureExtractor'''),
('''vit_msn''', '''ViTFeatureExtractor'''),
('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''),
('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''),
('''wavlm''', '''Wav2Vec2FeatureExtractor'''),
('''whisper''', '''WhisperFeatureExtractor'''),
('''xclip''', '''CLIPFeatureExtractor'''),
('''yolos''', '''YolosFeatureExtractor'''),
]
)
__snake_case :Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def __snake_case ( _UpperCAmelCase ):
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
__a = model_type_to_module_name(_UpperCAmelCase )
__a = importlib.import_module(f'.{module_name}' , '''transformers.models''' )
try:
return getattr(_UpperCAmelCase , _UpperCAmelCase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(_UpperCAmelCase , '''__name__''' , _UpperCAmelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__a = importlib.import_module('''transformers''' )
if hasattr(_UpperCAmelCase , _UpperCAmelCase ):
return getattr(_UpperCAmelCase , _UpperCAmelCase )
return None
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , **_UpperCAmelCase , ):
__a = get_file_from_repo(
_UpperCAmelCase , _UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , resume_download=_UpperCAmelCase , proxies=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , revision=_UpperCAmelCase , local_files_only=_UpperCAmelCase , )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(_UpperCAmelCase , encoding='''utf-8''' ) as reader:
return json.load(_UpperCAmelCase )
class _A :
def __init__( self : int):
'''simple docstring'''
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''')
@classmethod
@replace_list_option_in_docstrings(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( cls : Any , __SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
__a = kwargs.pop('''config''' , __SCREAMING_SNAKE_CASE)
__a = kwargs.pop('''trust_remote_code''' , __SCREAMING_SNAKE_CASE)
__a = True
__a , __a = FeatureExtractionMixin.get_feature_extractor_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
__a = config_dict.get('''feature_extractor_type''' , __SCREAMING_SNAKE_CASE)
__a = None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {}):
__a = config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__a = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
# It could be in `config.feature_extractor_type``
__a = getattr(__SCREAMING_SNAKE_CASE , '''feature_extractor_type''' , __SCREAMING_SNAKE_CASE)
if hasattr(__SCREAMING_SNAKE_CASE , '''auto_map''') and "AutoFeatureExtractor" in config.auto_map:
__a = config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
__a = feature_extractor_class_from_name(__SCREAMING_SNAKE_CASE)
__a = feature_extractor_auto_map is not None
__a = feature_extractor_class is not None or type(__SCREAMING_SNAKE_CASE) in FEATURE_EXTRACTOR_MAPPING
__a = resolve_trust_remote_code(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
if has_remote_code and trust_remote_code:
__a = get_class_from_dynamic_module(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
__a = kwargs.pop('''code_revision''' , __SCREAMING_SNAKE_CASE)
if os.path.isdir(__SCREAMING_SNAKE_CASE):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(__SCREAMING_SNAKE_CASE) in FEATURE_EXTRACTOR_MAPPING:
__a = FEATURE_EXTRACTOR_MAPPING[type(__SCREAMING_SNAKE_CASE)]
return feature_extractor_class.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
raise ValueError(
F'Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '
F'`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '
F'`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys())}')
@staticmethod
def _lowerCamelCase ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
FEATURE_EXTRACTOR_MAPPING.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
| 49 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case :Union[str, Any] = logging.get_logger(__name__)
__snake_case :Any = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Optional[int] = '''switch_transformers'''
UpperCamelCase__ : Optional[Any] = ['''past_key_values''']
UpperCamelCase__ : Optional[Any] = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str=32_128 , __SCREAMING_SNAKE_CASE : int=768 , __SCREAMING_SNAKE_CASE : Any=64 , __SCREAMING_SNAKE_CASE : Optional[int]=2_048 , __SCREAMING_SNAKE_CASE : List[str]=64 , __SCREAMING_SNAKE_CASE : int=12 , __SCREAMING_SNAKE_CASE : Any=3 , __SCREAMING_SNAKE_CASE : Optional[Any]=12 , __SCREAMING_SNAKE_CASE : Optional[int]=3 , __SCREAMING_SNAKE_CASE : Any=12 , __SCREAMING_SNAKE_CASE : Tuple=8 , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.01 , __SCREAMING_SNAKE_CASE : Dict="float32" , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Optional[Any]=32 , __SCREAMING_SNAKE_CASE : int=128 , __SCREAMING_SNAKE_CASE : Any=0.1 , __SCREAMING_SNAKE_CASE : int=1E-6 , __SCREAMING_SNAKE_CASE : Dict=0.0_01 , __SCREAMING_SNAKE_CASE : List[str]=0.0_01 , __SCREAMING_SNAKE_CASE : List[Any]=1.0 , __SCREAMING_SNAKE_CASE : Optional[int]="relu" , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : int=0 , __SCREAMING_SNAKE_CASE : List[Any]=1 , **__SCREAMING_SNAKE_CASE : Dict , ):
'''simple docstring'''
__a = vocab_size
__a = d_model
__a = d_kv
__a = d_ff
__a = num_sparse_encoder_layers
__a = num_layers
__a = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__a = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
__a = self.num_layers // self.num_sparse_encoder_layers
else:
__a = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
__a = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
__a = self.num_decoder_layers # HACK: this will create 0 sparse layers
__a = num_heads
__a = num_experts
__a = expert_capacity
__a = router_bias
__a = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}')
__a = router_dtype
__a = router_ignore_padding_tokens
__a = relative_attention_num_buckets
__a = relative_attention_max_distance
__a = dropout_rate
__a = layer_norm_epsilon
__a = initializer_factor
__a = feed_forward_proj
__a = use_cache
__a = add_router_probs
__a = router_z_loss_coef
__a = router_aux_loss_coef
__a = self.feed_forward_proj.split('''-''')
__a = act_info[-1]
__a = act_info[0] == '''gated'''
if len(__SCREAMING_SNAKE_CASE) > 1 and act_info[0] != "gated" or len(__SCREAMING_SNAKE_CASE) > 2:
raise ValueError(
F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''')
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__a = '''gelu_new'''
super().__init__(
pad_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , is_encoder_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
| 49 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
a_ = TypeVar("""T""")
a_ = TypeVar("""U""")
class __snake_case ( Generic[T, U] ):
"""simple docstring"""
def __init__( self , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : Optional[Any] = key
__A : Tuple = val
__A : DoubleLinkedListNode[T, U] | None = None
__A : DoubleLinkedListNode[T, U] | None = None
def __repr__( self ):
'''simple docstring'''
return (
F"""Node: key: {self.key}, val: {self.val}, """
F"""has next: {bool(self.next )}, has prev: {bool(self.prev )}"""
)
class __snake_case ( Generic[T, U] ):
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
__A : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(__lowerCamelCase , __lowerCamelCase )
__A : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(__lowerCamelCase , __lowerCamelCase )
__A , __A : int = self.rear, self.head
def __repr__( self ):
'''simple docstring'''
__A : Dict = ['''DoubleLinkedList''']
__A : str = self.head
while node.next is not None:
rep.append(str(__lowerCamelCase ) )
__A : List[str] = node.next
rep.append(str(self.rear ) )
return ",\n ".join(__lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
__A : Optional[int] = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
__A : Union[str, Any] = node
__A : int = previous
__A : int = node
__A : Tuple = self.rear
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
if node.prev is None or node.next is None:
return None
__A : Dict = node.next
__A : str = node.prev
__A : List[str] = None
__A : List[str] = None
return node
class __snake_case ( Generic[T, U] ):
"""simple docstring"""
_lowerCamelCase = {}
def __init__( self , __lowerCamelCase ):
'''simple docstring'''
__A : DoubleLinkedList[T, U] = DoubleLinkedList()
__A : List[Any] = capacity
__A : Optional[Any] = 0
__A : Any = 0
__A : str = 0
__A : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self ):
'''simple docstring'''
return (
F"""CacheInfo(hits={self.hits}, misses={self.miss}, """
F"""capacity={self.capacity}, current size={self.num_keys})"""
)
def __contains__( self , __lowerCamelCase ):
'''simple docstring'''
return key in self.cache
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
if key in self.cache:
self.hits += 1
__A : DoubleLinkedListNode[T, U] = self.cache[key]
__A : Optional[Any] = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(__lowerCamelCase )
return node.val
self.miss += 1
return None
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
__A : Dict = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(__lowerCamelCase ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
__A : Any = DoubleLinkedListNode(__lowerCamelCase , __lowerCamelCase )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
__A : Tuple = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
__A : Optional[Any] = value
self.list.add(__lowerCamelCase )
@classmethod
def UpperCamelCase__( cls , __lowerCamelCase = 128 ):
'''simple docstring'''
def cache_decorator_inner(__lowerCamelCase ) -> Callable[..., U]:
def cache_decorator_wrapper(*__lowerCamelCase ) -> U:
if func not in cls.decorator_function_to_instance_map:
__A : List[str] = LRUCache(__lowerCamelCase )
__A : Optional[Any] = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
__A : List[Any] = func(*__lowerCamelCase )
cls.decorator_function_to_instance_map[func].put(args[0] , __lowerCamelCase )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(__lowerCamelCase , '''cache_info''' , __lowerCamelCase ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 291 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
a_ = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE__ )
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , **__lowerCamelCase ):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , __lowerCamelCase , **__lowerCamelCase ):
'''simple docstring'''
return super().__call__(__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase__( self , **__lowerCamelCase ):
'''simple docstring'''
__A : Union[str, Any] = {}
if "candidate_labels" in kwargs:
__A : Tuple = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__A : List[str] = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase="This is a photo of {}." ):
'''simple docstring'''
__A : Optional[int] = load_image(__lowerCamelCase )
__A : Optional[int] = self.image_processor(images=[image] , return_tensors=self.framework )
__A : int = candidate_labels
__A : int = [hypothesis_template.format(__lowerCamelCase ) for x in candidate_labels]
__A : Dict = self.tokenizer(__lowerCamelCase , return_tensors=self.framework , padding=__lowerCamelCase )
__A : int = [text_inputs]
return inputs
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
__A : Optional[int] = model_inputs.pop('''candidate_labels''' )
__A : str = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , __lowerCamelCase ):
__A : Union[str, Any] = text_inputs[0]
else:
# Batching case.
__A : str = text_inputs[0][0]
__A : List[str] = self.model(**__lowerCamelCase , **__lowerCamelCase )
__A : Dict = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
__A : Optional[int] = model_outputs.pop('''candidate_labels''' )
__A : int = model_outputs['''logits'''][0]
if self.framework == "pt":
__A : Union[str, Any] = logits.softmax(dim=-1 ).squeeze(-1 )
__A : Dict = probs.tolist()
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
__A : List[Any] = [scores]
elif self.framework == "tf":
__A : List[Any] = stable_softmax(__lowerCamelCase , axis=-1 )
__A : str = probs.numpy().tolist()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
__A : str = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(__lowerCamelCase , __lowerCamelCase ) , key=lambda __lowerCamelCase : -x[0] )
]
return result
| 291 | 1 |
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
a_ = logging.getLogger(__name__)
a_ = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
a_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _UpperCamelCase :
'''simple docstring'''
lowerCamelCase__ =field(
default=__A , metadata={
'help': (
'The model checkpoint for weights initialization. Leave None if you want to train a model from'
' scratch.'
)
} , )
lowerCamelCase__ =field(
default=__A , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(__A )} , )
lowerCamelCase__ =field(
default=__A , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCamelCase__ =field(
default=__A , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCamelCase__ =field(
default=__A , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class _UpperCamelCase :
'''simple docstring'''
lowerCamelCase__ =field(
default=__A , metadata={'help': 'The input training data file (a text file).'} )
lowerCamelCase__ =field(
default=__A , metadata={
'help': (
'The input training data files (multiple files in glob format). '
'Very often splitting large files to smaller files can prevent tokenizer going out of memory'
)
} , )
lowerCamelCase__ =field(
default=__A , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
lowerCamelCase__ =field(
default=__A , metadata={'help': 'An optional input train ref data file for whole word mask in Chinese.'} , )
lowerCamelCase__ =field(
default=__A , metadata={'help': 'An optional input eval ref data file for whole word mask in Chinese.'} , )
lowerCamelCase__ =field(
default=__A , metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'} , )
lowerCamelCase__ =field(
default=__A , metadata={'help': 'Train with masked-language modeling loss instead of language modeling.'} )
lowerCamelCase__ =field(default=__A , metadata={'help': 'Whether ot not to use whole word mask.'} )
lowerCamelCase__ =field(
default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
lowerCamelCase__ =field(
default=1 / 6 , metadata={
'help': (
'Ratio of length of a span of masked tokens to surrounding context length for permutation language'
' modeling.'
)
} , )
lowerCamelCase__ =field(
default=5 , metadata={'help': 'Maximum length of a span of masked tokens for permutation language modeling.'} )
lowerCamelCase__ =field(
default=-1 , metadata={
'help': (
'Optional input sequence length after tokenization.'
'The training dataset will be truncated in block of this size for training.'
'Default to the model max input length for single sentence inputs (take into account special tokens).'
)
} , )
lowerCamelCase__ =field(
default=__A , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def lowerCamelCase__ ( _a , _a , _a = False , _a = None , ):
def _dataset(_a , _a=None):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("You need to set world whole masking and mlm to True for Chinese Whole Word Mask")
return LineByLineWithRefDataset(
tokenizer=_a , file_path=_a , block_size=args.block_size , ref_path=_a , )
return LineByLineTextDataset(tokenizer=_a , file_path=_a , block_size=args.block_size)
else:
return TextDataset(
tokenizer=_a , file_path=_a , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=_a , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file)
elif args.train_data_files:
return ConcatDataset([_dataset(_a) for f in glob(args.train_data_files)])
else:
return _dataset(args.train_data_file , args.train_ref_file)
def lowerCamelCase__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file "
"or remove the --do_eval argument.")
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
" --overwrite_output_dir to overcome.")
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , _a)
# Set seed
set_seed(training_args.seed)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
SCREAMING_SNAKE_CASE : Tuple = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir)
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir)
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.tokenizer_name:
SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir)
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"
" script, save it,and load it from here, using --tokenizer_name")
if model_args.model_name_or_path:
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path) , config=_a , cache_dir=model_args.cache_dir , )
else:
logger.info("Training new model from scratch")
SCREAMING_SNAKE_CASE : List[Any] = AutoModelWithLMHead.from_config(_a)
model.resize_token_embeddings(len(_a))
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"
"--mlm flag (masked language modeling).")
if data_args.block_size <= 0:
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
SCREAMING_SNAKE_CASE : Any = min(data_args.block_size , tokenizer.max_len)
# Get datasets
SCREAMING_SNAKE_CASE : str = (
get_dataset(_a , tokenizer=_a , cache_dir=model_args.cache_dir) if training_args.do_train else None
)
SCREAMING_SNAKE_CASE : Any = (
get_dataset(_a , tokenizer=_a , evaluate=_a , cache_dir=model_args.cache_dir)
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
SCREAMING_SNAKE_CASE : int = DataCollatorForPermutationLanguageModeling(
tokenizer=_a , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
SCREAMING_SNAKE_CASE : Dict = DataCollatorForWholeWordMask(
tokenizer=_a , mlm_probability=data_args.mlm_probability)
else:
SCREAMING_SNAKE_CASE : Optional[int] = DataCollatorForLanguageModeling(
tokenizer=_a , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability)
# Initialize our Trainer
SCREAMING_SNAKE_CASE : List[str] = Trainer(
model=_a , args=_a , data_collator=_a , train_dataset=_a , eval_dataset=_a , prediction_loss_only=_a , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE : int = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path)
else None
)
trainer.train(model_path=_a)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
SCREAMING_SNAKE_CASE : Any = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
SCREAMING_SNAKE_CASE : Optional[int] = trainer.evaluate()
SCREAMING_SNAKE_CASE : Dict = math.exp(eval_output["eval_loss"])
SCREAMING_SNAKE_CASE : str = {"perplexity": perplexity}
SCREAMING_SNAKE_CASE : Dict = os.path.join(training_args.output_dir , "eval_results_lm.txt")
if trainer.is_world_master():
with open(_a , "w") as writer:
logger.info("***** Eval results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s" , _a , str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
results.update(_a)
return results
def lowerCamelCase__ ( _a):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 76 |
"""simple docstring"""
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =int(lowercase )
# Initialize Result
SCREAMING_SNAKE_CASE_: str =[]
# Traverse through all denomination
for denomination in reversed(lowercase ):
# Find denominations
while int(lowercase ) >= int(lowercase ):
total_value -= int(lowercase )
answer.append(lowercase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
_UpperCAmelCase = []
_UpperCAmelCase = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
_UpperCAmelCase = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(f"""Denomination {i}: """).strip()))
_UpperCAmelCase = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
_UpperCAmelCase = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
_UpperCAmelCase = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(f"""Following is minimal change for {value}: """)
_UpperCAmelCase = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 173 | 0 |
import math
from collections.abc import Iterator
from itertools import takewhile
def UpperCamelCase (lowercase_: int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCamelCase () -> Iterator[int]:
A__ : List[str] = 2
while True:
if is_prime(lowercase_ ):
yield num
num += 1
def UpperCamelCase (lowercase_: int = 2000000 ) -> int:
return sum(takewhile(lambda lowercase_ : x < n , prime_generator() ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 141 |
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def UpperCamelCase (lowercase_: int ) -> str:
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class _a (__magic_name__ ):
'''simple docstring'''
@staticmethod
def __A ( A__ ):
A__ : Any = parser.add_parser("""download""" )
download_parser.add_argument(
"""--cache-dir""" , type=A__ , default=A__ , help="""Path to location to store the models""" )
download_parser.add_argument(
"""--force""" , action="""store_true""" , help="""Force the model to be download even if already in cache-dir""" )
download_parser.add_argument(
"""--trust-remote-code""" , action="""store_true""" , help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" , )
download_parser.add_argument("""model""" , type=A__ , help="""Name of the model to download""" )
download_parser.set_defaults(func=A__ )
def __init__( self , A__ , A__ , A__ , A__ ):
A__ : Union[str, Any] = model
A__ : Dict = cache
A__ : str = force
A__ : Tuple = trust_remote_code
def __A ( self ):
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 141 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case : Optional[int] ={
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Dict =['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[Any] =[
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__snake_case : List[str] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 129 |
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
__snake_case : List[str] =logging.get_logger(__name__)
# General docstring
__snake_case : List[Any] ='MobileNetV1Config'
# Base docstring
__snake_case : Optional[Any] ='google/mobilenet_v1_1.0_224'
__snake_case : Any =[1, 1_0_2_4, 7, 7]
# Image classification docstring
__snake_case : Tuple ='google/mobilenet_v1_1.0_224'
__snake_case : Tuple ='tabby, tabby cat'
__snake_case : Optional[Any] =[
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def lowerCAmelCase__ ( lowerCamelCase_ : Union[str, Any] ,lowerCamelCase_ : Optional[Any] ,lowerCamelCase_ : Dict=None):
'''simple docstring'''
lowerCAmelCase__ : Dict = {}
if isinstance(lowerCamelCase_ ,lowerCamelCase_):
lowerCAmelCase__ : Any = model.mobilenet_va
else:
lowerCAmelCase__ : Tuple = model
lowerCAmelCase__ : Union[str, Any] = '''MobilenetV1/Conv2d_0/'''
lowerCAmelCase__ : Tuple = backbone.conv_stem.convolution.weight
lowerCAmelCase__ : int = backbone.conv_stem.normalization.bias
lowerCAmelCase__ : Optional[int] = backbone.conv_stem.normalization.weight
lowerCAmelCase__ : str = backbone.conv_stem.normalization.running_mean
lowerCAmelCase__ : str = backbone.conv_stem.normalization.running_var
for i in range(13):
lowerCAmelCase__ : Tuple = i + 1
lowerCAmelCase__ : Any = i * 2
lowerCAmelCase__ : Any = backbone.layer[pt_index]
lowerCAmelCase__ : List[str] = f"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
lowerCAmelCase__ : Optional[int] = pointer.convolution.weight
lowerCAmelCase__ : Optional[int] = pointer.normalization.bias
lowerCAmelCase__ : Union[str, Any] = pointer.normalization.weight
lowerCAmelCase__ : List[Any] = pointer.normalization.running_mean
lowerCAmelCase__ : List[Any] = pointer.normalization.running_var
lowerCAmelCase__ : Dict = backbone.layer[pt_index + 1]
lowerCAmelCase__ : Optional[Any] = f"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
lowerCAmelCase__ : Tuple = pointer.convolution.weight
lowerCAmelCase__ : int = pointer.normalization.bias
lowerCAmelCase__ : Union[str, Any] = pointer.normalization.weight
lowerCAmelCase__ : Any = pointer.normalization.running_mean
lowerCAmelCase__ : str = pointer.normalization.running_var
if isinstance(lowerCamelCase_ ,lowerCamelCase_):
lowerCAmelCase__ : str = '''MobilenetV1/Logits/Conv2d_1c_1x1/'''
lowerCAmelCase__ : Tuple = model.classifier.weight
lowerCAmelCase__ : Dict = model.classifier.bias
return tf_to_pt_map
def lowerCAmelCase__ ( lowerCamelCase_ : Any ,lowerCamelCase_ : int ,lowerCamelCase_ : int):
'''simple docstring'''
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'''Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '''
'''https://www.tensorflow.org/install/ for installation instructions.''')
raise
# Load weights from TF model
lowerCAmelCase__ : str = tf.train.list_variables(lowerCamelCase_)
lowerCAmelCase__ : Optional[int] = {}
for name, shape in init_vars:
logger.info(f"""Loading TF weight {name} with shape {shape}""")
lowerCAmelCase__ : Dict = tf.train.load_variable(lowerCamelCase_ ,lowerCamelCase_)
lowerCAmelCase__ : List[str] = array
# Build TF to PyTorch weights loading map
lowerCAmelCase__ : List[Any] = _build_tf_to_pytorch_map(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_)
for name, pointer in tf_to_pt_map.items():
logger.info(f"""Importing {name}""")
if name not in tf_weights:
logger.info(f"""{name} not in tf pre-trained weights, skipping""")
continue
lowerCAmelCase__ : Any = tf_weights[name]
if "depthwise_weights" in name:
logger.info('''Transposing depthwise''')
lowerCAmelCase__ : Optional[Any] = np.transpose(lowerCamelCase_ ,(2, 3, 0, 1))
elif "weights" in name:
logger.info('''Transposing''')
if len(pointer.shape) == 2: # copying into linear layer
lowerCAmelCase__ : List[str] = array.squeeze().transpose()
else:
lowerCAmelCase__ : Tuple = np.transpose(lowerCamelCase_ ,(3, 2, 0, 1))
if pointer.shape != array.shape:
raise ValueError(f"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""")
logger.info(f"""Initialize PyTorch weight {name} {array.shape}""")
lowerCAmelCase__ : str = torch.from_numpy(lowerCamelCase_)
tf_weights.pop(lowerCamelCase_ ,lowerCamelCase_)
tf_weights.pop(name + '''/RMSProp''' ,lowerCamelCase_)
tf_weights.pop(name + '''/RMSProp_1''' ,lowerCamelCase_)
tf_weights.pop(name + '''/ExponentialMovingAverage''' ,lowerCamelCase_)
logger.info(f"""Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}""")
return model
def lowerCAmelCase__ ( lowerCamelCase_ : torch.Tensor ,lowerCamelCase_ : nn.Convad):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = features.shape[-2:]
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = conv_layer.stride
lowerCAmelCase__ , lowerCAmelCase__ : Dict = conv_layer.kernel_size
if in_height % stride_height == 0:
lowerCAmelCase__ : Dict = max(kernel_height - stride_height ,0)
else:
lowerCAmelCase__ : List[str] = max(kernel_height - (in_height % stride_height) ,0)
if in_width % stride_width == 0:
lowerCAmelCase__ : List[Any] = max(kernel_width - stride_width ,0)
else:
lowerCAmelCase__ : Any = max(kernel_width - (in_width % stride_width) ,0)
lowerCAmelCase__ : Union[str, Any] = pad_along_width // 2
lowerCAmelCase__ : Optional[Any] = pad_along_width - pad_left
lowerCAmelCase__ : List[Any] = pad_along_height // 2
lowerCAmelCase__ : int = pad_along_height - pad_top
lowerCAmelCase__ : str = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(lowerCamelCase_ ,lowerCamelCase_ ,'''constant''' ,0.0)
class lowerCamelCase__ ( nn.Module):
'''simple docstring'''
def __init__(self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = 1 ,__lowerCamelCase = 1 ,__lowerCamelCase = False ,__lowerCamelCase = True ,__lowerCamelCase = True ,) -> None:
"""simple docstring"""
super().__init__()
lowerCAmelCase__ : str = config
if in_channels % groups != 0:
raise ValueError(f"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(f"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
lowerCAmelCase__ : List[Any] = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
lowerCAmelCase__ : Optional[int] = nn.Convad(
in_channels=__lowerCamelCase ,out_channels=__lowerCamelCase ,kernel_size=__lowerCamelCase ,stride=__lowerCamelCase ,padding=__lowerCamelCase ,groups=__lowerCamelCase ,bias=__lowerCamelCase ,padding_mode='''zeros''' ,)
if use_normalization:
lowerCAmelCase__ : Optional[int] = nn.BatchNormad(
num_features=__lowerCamelCase ,eps=config.layer_norm_eps ,momentum=0.9997 ,affine=__lowerCamelCase ,track_running_stats=__lowerCamelCase ,)
else:
lowerCAmelCase__ : Dict = None
if use_activation:
if isinstance(__lowerCamelCase ,__lowerCamelCase ):
lowerCAmelCase__ : Tuple = ACTaFN[use_activation]
elif isinstance(config.hidden_act ,__lowerCamelCase ):
lowerCAmelCase__ : Any = ACTaFN[config.hidden_act]
else:
lowerCAmelCase__ : List[str] = config.hidden_act
else:
lowerCAmelCase__ : int = None
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> torch.Tensor:
"""simple docstring"""
if self.config.tf_padding:
lowerCAmelCase__ : str = apply_tf_padding(__lowerCamelCase ,self.convolution )
lowerCAmelCase__ : Tuple = self.convolution(__lowerCamelCase )
if self.normalization is not None:
lowerCAmelCase__ : Tuple = self.normalization(__lowerCamelCase )
if self.activation is not None:
lowerCAmelCase__ : Union[str, Any] = self.activation(__lowerCamelCase )
return features
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =MobileNetVaConfig
snake_case_ =load_tf_weights_in_mobilenet_va
snake_case_ ="""mobilenet_v1"""
snake_case_ ="""pixel_values"""
snake_case_ =False
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> None:
"""simple docstring"""
if isinstance(__lowerCamelCase ,(nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__lowerCamelCase ,nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
__snake_case : Optional[int] =R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__snake_case : List[Any] =R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.""" , lowerCamelCase__ , )
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
def __init__(self ,__lowerCamelCase ,__lowerCamelCase = True ) -> int:
"""simple docstring"""
super().__init__(__lowerCamelCase )
lowerCAmelCase__ : Dict = config
lowerCAmelCase__ : Dict = 32
lowerCAmelCase__ : List[str] = max(int(depth * config.depth_multiplier ) ,config.min_depth )
lowerCAmelCase__ : Optional[int] = MobileNetVaConvLayer(
__lowerCamelCase ,in_channels=config.num_channels ,out_channels=__lowerCamelCase ,kernel_size=3 ,stride=2 ,)
lowerCAmelCase__ : Optional[Any] = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
lowerCAmelCase__ : Union[str, Any] = nn.ModuleList()
for i in range(13 ):
lowerCAmelCase__ : Tuple = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
lowerCAmelCase__ : Optional[int] = max(int(depth * config.depth_multiplier ) ,config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
__lowerCamelCase ,in_channels=__lowerCamelCase ,out_channels=__lowerCamelCase ,kernel_size=3 ,stride=strides[i] ,groups=__lowerCamelCase ,) )
self.layer.append(
MobileNetVaConvLayer(
__lowerCamelCase ,in_channels=__lowerCamelCase ,out_channels=__lowerCamelCase ,kernel_size=1 ,) )
lowerCAmelCase__ : Union[str, Any] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
raise NotImplementedError
@add_start_docstrings_to_model_forward(__lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=__lowerCamelCase ,config_class=_CONFIG_FOR_DOC ,modality='''vision''' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def lowerCAmelCase__ (self ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
lowerCAmelCase__ : Optional[Any] = self.conv_stem(__lowerCamelCase )
lowerCAmelCase__ : Any = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
lowerCAmelCase__ : int = layer_module(__lowerCamelCase )
if output_hidden_states:
lowerCAmelCase__ : Optional[Any] = all_hidden_states + (hidden_states,)
lowerCAmelCase__ : Any = hidden_states
if self.pooler is not None:
lowerCAmelCase__ : str = torch.flatten(self.pooler(__lowerCamelCase ) ,start_dim=1 )
else:
lowerCAmelCase__ : str = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__lowerCamelCase ,pooler_output=__lowerCamelCase ,hidden_states=__lowerCamelCase ,)
@add_start_docstrings(
"""
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , lowerCamelCase__ , )
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
def __init__(self ,__lowerCamelCase ) -> None:
"""simple docstring"""
super().__init__(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = config.num_labels
lowerCAmelCase__ : Dict = MobileNetVaModel(__lowerCamelCase )
lowerCAmelCase__ : Dict = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
lowerCAmelCase__ : str = nn.Dropout(config.classifier_dropout_prob ,inplace=__lowerCamelCase )
lowerCAmelCase__ : List[Any] = nn.Linear(__lowerCamelCase ,config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=__lowerCamelCase ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def lowerCAmelCase__ (self ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ : Union[str, Any] = self.mobilenet_va(__lowerCamelCase ,output_hidden_states=__lowerCamelCase ,return_dict=__lowerCamelCase )
lowerCAmelCase__ : List[str] = outputs.pooler_output if return_dict else outputs[1]
lowerCAmelCase__ : Optional[int] = self.classifier(self.dropout(__lowerCamelCase ) )
lowerCAmelCase__ : Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCAmelCase__ : Optional[Any] = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCAmelCase__ : int = '''single_label_classification'''
else:
lowerCAmelCase__ : Optional[int] = '''multi_label_classification'''
if self.config.problem_type == "regression":
lowerCAmelCase__ : Dict = MSELoss()
if self.num_labels == 1:
lowerCAmelCase__ : Optional[Any] = loss_fct(logits.squeeze() ,labels.squeeze() )
else:
lowerCAmelCase__ : Tuple = loss_fct(__lowerCamelCase ,__lowerCamelCase )
elif self.config.problem_type == "single_label_classification":
lowerCAmelCase__ : int = CrossEntropyLoss()
lowerCAmelCase__ : Union[str, Any] = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowerCAmelCase__ : Optional[int] = BCEWithLogitsLoss()
lowerCAmelCase__ : List[Any] = loss_fct(__lowerCamelCase ,__lowerCamelCase )
if not return_dict:
lowerCAmelCase__ : List[str] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=__lowerCamelCase ,logits=__lowerCamelCase ,hidden_states=outputs.hidden_states ,)
| 129 | 1 |
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__snake_case =[
'cross_validation.py',
'gradient_accumulation.py',
'local_sgd.py',
'multi_process_metrics.py',
'memory.py',
'automatic_gradient_accumulation.py',
'fsdp_with_peak_mem_tracking.py',
'deepspeed_with_config_support.py',
'megatron_lm_gpt_pretraining.py',
]
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : bool , UpperCAmelCase__ : str = None , UpperCAmelCase__ : list = None ) -> List[Any]:
lowerCAmelCase = None
lowerCAmelCase = os.path.abspath(os.path.join('examples' , 'by_feature' ) )
lowerCAmelCase = os.path.abspath('examples' )
for item in os.listdir(lowercase_ ):
if item not in EXCLUDE_EXAMPLES:
lowerCAmelCase = os.path.join(lowercase_ , lowercase_ )
if os.path.isfile(lowercase_ ) and ".py" in item_path:
with self.subTest(
tested_script=lowercase_ , feature_script=lowercase_ , tested_section='main()' if parser_only else 'training_function()' , ):
lowerCAmelCase = compare_against_test(
os.path.join(lowercase_ , lowercase_ ) , lowercase_ , lowercase_ , lowercase_ )
lowerCAmelCase = '\n'.join(lowercase_ )
if special_strings is not None:
for string in special_strings:
lowerCAmelCase = diff.replace(lowercase_ , '' )
self.assertEqual(lowercase_ , '' )
def __UpperCAmelCase ( self : List[str] ) -> Any:
self.one_complete_example('complete_nlp_example.py' , lowercase_ )
self.one_complete_example('complete_nlp_example.py' , lowercase_ )
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
lowerCAmelCase = os.path.abspath(os.path.join('examples' , 'cv_example.py' ) )
lowerCAmelCase = [
' ' * 1_6 + '{\n\n',
' ' * 2_0 + '"accuracy": eval_metric["accuracy"],\n\n',
' ' * 2_0 + '"f1": eval_metric["f1"],\n\n',
' ' * 2_0 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n',
' ' * 2_0 + '"epoch": epoch,\n\n',
' ' * 1_6 + '},\n\n',
' ' * 1_6 + 'step=epoch,\n',
' ' * 1_2,
' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n',
]
self.one_complete_example('complete_cv_example.py' , lowercase_ , lowercase_ , lowercase_ )
self.one_complete_example('complete_cv_example.py' , lowercase_ , lowercase_ , lowercase_ )
@mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class UpperCAmelCase_ ( _lowerCamelCase ):
lowerCamelCase : Union[str, Any] = False
@classmethod
def __UpperCAmelCase ( cls : List[str] ) -> Tuple:
super().setUpClass()
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = os.path.join(cls._tmpdir , 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
lowerCAmelCase = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def __UpperCAmelCase ( cls : Optional[int] ) -> Tuple:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def __UpperCAmelCase ( self : List[str] ) -> str:
lowerCAmelCase = F'''\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0' ) ) )
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
lowerCAmelCase = F'''\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '''.split()
lowerCAmelCase = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2' ) ) )
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
lowerCAmelCase = F'''\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}\n '''.split()
lowerCAmelCase = run_command(self._launch_args + testargs , return_stdout=lowercase_ )
self.assertNotIn('epoch 0:' , lowercase_ )
self.assertIn('epoch 1:' , lowercase_ )
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
lowerCAmelCase = F'''\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}\n '''.split()
lowerCAmelCase = run_command(self._launch_args + testargs , return_stdout=lowercase_ )
if torch.cuda.is_available():
lowerCAmelCase = torch.cuda.device_count()
else:
lowerCAmelCase = 1
if num_processes > 1:
self.assertNotIn('epoch 0:' , lowercase_ )
self.assertIn('epoch 1:' , lowercase_ )
else:
self.assertIn('epoch 0:' , lowercase_ )
self.assertIn('epoch 1:' , lowercase_ )
@slow
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
lowerCAmelCase = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split()
with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'} ):
lowerCAmelCase = run_command(self._launch_args + testargs , return_stdout=lowercase_ )
lowerCAmelCase = re.findall('({.+})' , lowercase_ )
lowerCAmelCase = [r for r in results if 'accuracy' in r][-1]
lowerCAmelCase = ast.literal_eval(lowercase_ )
self.assertGreaterEqual(results['accuracy'] , 0.75 )
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
lowerCAmelCase = ['examples/by_feature/multi_process_metrics.py']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def __UpperCAmelCase ( self : Dict ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdir:
lowerCAmelCase = F'''\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(lowercase_ , 'tracking' ) ) )
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
lowerCAmelCase = ['examples/by_feature/gradient_accumulation.py']
run_command(self._launch_args + testargs )
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
lowerCAmelCase = ['examples/by_feature/local_sgd.py']
run_command(self._launch_args + testargs )
| 355 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def a_ ( lowerCamelCase : str = "AAPL" ):
lowerCAmelCase = f'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
lowerCAmelCase = BeautifulSoup(requests.get(lowerCamelCase ).text , 'html.parser' )
lowerCAmelCase = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_ ).find('span' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 55 | 0 |
from __future__ import annotations
from typing import TypedDict
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = 42
__magic_name__ = 42
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
if not isinstance(UpperCamelCase_ ,UpperCamelCase_ ):
raise TypeError('''The parameter s type must be str.''' )
return [s[i:] + s[:i] for i in range(len(UpperCamelCase_ ) )]
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
if not isinstance(UpperCamelCase_ ,UpperCamelCase_ ):
raise TypeError('''The parameter s type must be str.''' )
if not s:
raise ValueError('''The parameter s must not be empty.''' )
snake_case = all_rotations(UpperCamelCase_ )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
snake_case = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(UpperCamelCase_ ),
}
return response
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
if not isinstance(UpperCamelCase_ ,UpperCamelCase_ ):
raise TypeError('''The parameter bwt_string type must be str.''' )
if not bwt_string:
raise ValueError('''The parameter bwt_string must not be empty.''' )
try:
snake_case = int(UpperCamelCase_ )
except ValueError:
raise TypeError(
'''The parameter idx_original_string type must be int or passive'''
''' of cast to int.''' )
if idx_original_string < 0:
raise ValueError('''The parameter idx_original_string must not be lower than 0.''' )
if idx_original_string >= len(UpperCamelCase_ ):
raise ValueError(
'''The parameter idx_original_string must be lower than''' ''' len(bwt_string).''' )
snake_case = [''''''] * len(UpperCamelCase_ )
for _ in range(len(UpperCamelCase_ ) ):
for i in range(len(UpperCamelCase_ ) ):
snake_case = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : str = "Provide a string that I will generate its BWT transform: "
_SCREAMING_SNAKE_CASE : List[Any] = input(entry_msg).strip()
_SCREAMING_SNAKE_CASE : List[Any] = bwt_transform(s)
print(
f'''Burrows Wheeler transform for string \'{s}\' results '''
f'''in \'{result['bwt_string']}\''''
)
_SCREAMING_SNAKE_CASE : int = reverse_bwt(result["bwt_string"], result["idx_original_string"])
print(
f'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' '''
f'''we get original string \'{original_string}\''''
)
| 127 |
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class A__ :
"""simple docstring"""
__magic_name__ = XGLMConfig
__magic_name__ = {}
__magic_name__ = 'gelu'
def __init__( self , __snake_case , __snake_case=1_4 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=9_9 , __snake_case=3_2 , __snake_case=2 , __snake_case=4 , __snake_case=3_7 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_1_2 , __snake_case=0.02 , ):
snake_case = parent
snake_case = batch_size
snake_case = seq_length
snake_case = is_training
snake_case = use_input_mask
snake_case = use_labels
snake_case = vocab_size
snake_case = d_model
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = ffn_dim
snake_case = activation_function
snake_case = activation_dropout
snake_case = attention_dropout
snake_case = max_position_embeddings
snake_case = initializer_range
snake_case = None
snake_case = 0
snake_case = 2
snake_case = 1
def a_ ( self ):
return XGLMConfig.from_pretrained('''facebook/xglm-564M''' )
def a_ ( self ):
snake_case = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
snake_case = None
if self.use_input_mask:
snake_case = random_attention_mask([self.batch_size, self.seq_length] )
snake_case = self.get_config()
snake_case = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def a_ ( self ):
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__snake_case , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__snake_case , )
def a_ ( self ):
snake_case = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) = config_and_inputs
snake_case = {
'''input_ids''': input_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_tf
class A__ ( snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
__magic_name__ = (TFXGLMForCausalLM,) if is_tf_available() else ()
__magic_name__ = (
{'feature-extraction': TFXGLMModel, 'text-generation': TFXGLMForCausalLM} if is_tf_available() else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def a_ ( self ):
snake_case = TFXGLMModelTester(self )
snake_case = ConfigTester(self , config_class=__snake_case , n_embd=3_7 )
def a_ ( self ):
self.config_tester.run_common_tests()
@slow
def a_ ( self ):
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case = TFXGLMModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''' )
def a_ ( self ):
super().test_resize_token_embeddings()
@require_tf
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def a_ ( self , __snake_case=True ):
snake_case = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
snake_case = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
snake_case = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1]
# fmt: on
snake_case = model.generate(__snake_case , do_sample=__snake_case , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , __snake_case )
@slow
def a_ ( self ):
snake_case = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
snake_case = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
tf.random.set_seed(0 )
snake_case = tokenizer('''Today is a nice day and''' , return_tensors='''tf''' )
snake_case = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(''':/CPU:0''' ):
snake_case = model.generate(__snake_case , do_sample=__snake_case , seed=[7, 0] )
snake_case = tokenizer.decode(output_ids[0] , skip_special_tokens=__snake_case )
snake_case = (
'''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'''
)
self.assertEqual(__snake_case , __snake_case )
@slow
def a_ ( self ):
snake_case = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
snake_case = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
snake_case = '''left'''
# use different length sentences to test batching
snake_case = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When''',
'''Hello, my dog is a little''',
]
snake_case = tokenizer(__snake_case , return_tensors='''tf''' , padding=__snake_case )
snake_case = inputs['''input_ids''']
snake_case = model.generate(input_ids=__snake_case , attention_mask=inputs['''attention_mask'''] , max_new_tokens=1_2 )
snake_case = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
snake_case = model.generate(input_ids=__snake_case , max_new_tokens=1_2 )
snake_case = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
snake_case = model.generate(input_ids=__snake_case , max_new_tokens=1_2 )
snake_case = tokenizer.batch_decode(__snake_case , skip_special_tokens=__snake_case )
snake_case = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__snake_case )
snake_case = tokenizer.decode(output_padded[0] , skip_special_tokens=__snake_case )
snake_case = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '''
'''a single''',
'''Hello, my dog is a little bit of a shy one, but he is very friendly''',
]
self.assertListEqual(__snake_case , __snake_case )
self.assertListEqual(__snake_case , [non_padded_sentence, padded_sentence] )
| 127 | 1 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
_A = get_logger(__name__)
_A = Path(__file__).parent / 'model_card_template.md'
_A = uuida().hex
_A = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
_A = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
_A = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[Dict, str, None] = None ):
__UpperCamelCase =F'diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F'; torch/{_torch_version}'
if is_flax_available():
ua += F'; jax/{_jax_version}'
ua += F'; flax/{_flax_version}'
if is_onnx_available():
ua += F'; onnxruntime/{_onnxruntime_version}'
# CI will set this value to True
if os.environ.get('DIFFUSERS_IS_CI' , '' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
ua += "; " + "; ".join(F'{k}/{v}' for k, v in user_agent.items() )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
ua += "; " + user_agent
return ua
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
if token is None:
__UpperCamelCase =HfFolder.get_token()
if organization is None:
__UpperCamelCase =whoami(SCREAMING_SNAKE_CASE__ )['name']
return F'{username}/{model_id}'
else:
return F'{organization}/{model_id}'
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any ):
if not is_jinja_available():
raise ValueError(
'Modelcard rendering is based on Jinja templates.'
' Please make sure to have `jinja` installed before using `create_model_card`.'
' To install it, please run `pip install Jinja2`.' )
if hasattr(SCREAMING_SNAKE_CASE__ , 'local_rank' ) and args.local_rank not in [-1, 0]:
return
__UpperCamelCase =args.hub_token if hasattr(SCREAMING_SNAKE_CASE__ , 'hub_token' ) else None
__UpperCamelCase =get_full_repo_name(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='en' , license='apache-2.0' , library_name='diffusers' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=SCREAMING_SNAKE_CASE__ , model_name=SCREAMING_SNAKE_CASE__ , repo_name=SCREAMING_SNAKE_CASE__ , dataset_name=args.dataset_name if hasattr(SCREAMING_SNAKE_CASE__ , 'dataset_name' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(SCREAMING_SNAKE_CASE__ , 'gradient_accumulation_steps' ) else None
) , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE__ , 'adam_beta1' ) else None , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE__ , 'adam_beta2' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(SCREAMING_SNAKE_CASE__ , 'adam_weight_decay' ) else None , adam_epsilon=args.adam_epsilon if hasattr(SCREAMING_SNAKE_CASE__ , 'adam_epsilon' ) else None , lr_scheduler=args.lr_scheduler if hasattr(SCREAMING_SNAKE_CASE__ , 'lr_scheduler' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(SCREAMING_SNAKE_CASE__ , 'lr_warmup_steps' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(SCREAMING_SNAKE_CASE__ , 'ema_inv_gamma' ) else None , ema_power=args.ema_power if hasattr(SCREAMING_SNAKE_CASE__ , 'ema_power' ) else None , ema_max_decay=args.ema_max_decay if hasattr(SCREAMING_SNAKE_CASE__ , 'ema_max_decay' ) else None , mixed_precision=args.mixed_precision , )
__UpperCamelCase =os.path.join(args.output_dir , 'README.md' )
model_card.save(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[str] , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
if resolved_file is None or commit_hash is not None:
return commit_hash
__UpperCamelCase =str(Path(SCREAMING_SNAKE_CASE__ ).as_posix() )
__UpperCamelCase =re.search(r'snapshots/([^/]+)/' , SCREAMING_SNAKE_CASE__ )
if search is None:
return None
__UpperCamelCase =search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(SCREAMING_SNAKE_CASE__ ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
_A = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
_A = os.path.join(hf_cache_home, 'diffusers')
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[str] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
if new_cache_dir is None:
__UpperCamelCase =DIFFUSERS_CACHE
if old_cache_dir is None:
__UpperCamelCase =old_diffusers_cache
__UpperCamelCase =Path(SCREAMING_SNAKE_CASE__ ).expanduser()
__UpperCamelCase =Path(SCREAMING_SNAKE_CASE__ ).expanduser()
for old_blob_path in old_cache_dir.glob('**/blobs/*' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
__UpperCamelCase =new_cache_dir / old_blob_path.relative_to(SCREAMING_SNAKE_CASE__ )
new_blob_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
os.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
try:
os.symlink(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
except OSError:
logger.warning(
'Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
_A = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
_A = 0
else:
with open(cache_version_file) as f:
try:
_A = int(f.read())
except ValueError:
_A = 0
if cache_version < 1:
_A = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
_A = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
f"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
f"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
'the directory exists and can be written to.'
)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
if variant is not None:
__UpperCamelCase =weights_name.split('.' )
__UpperCamelCase =splits[:-1] + [variant] + splits[-1:]
__UpperCamelCase ='.'.join(SCREAMING_SNAKE_CASE__ )
return weights_name
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , *,
SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str]=None , ):
__UpperCamelCase =str(SCREAMING_SNAKE_CASE__ )
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
return pretrained_model_name_or_path
elif os.path.isdir(SCREAMING_SNAKE_CASE__ ):
if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ):
# Load from a PyTorch checkpoint
__UpperCamelCase =os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ):
__UpperCamelCase =os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return model_file
else:
raise EnvironmentError(
F'Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(SCREAMING_SNAKE_CASE__ ).base_version ) >= version.parse('0.20.0' )
):
try:
__UpperCamelCase =hf_hub_download(
SCREAMING_SNAKE_CASE__ , filename=_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , user_agent=SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ , revision=revision or commit_hash , )
warnings.warn(
F'Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.' , SCREAMING_SNAKE_CASE__ , )
return model_file
except: # noqa: E722
warnings.warn(
F'You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}\' so that the correct variant file can be added.' , SCREAMING_SNAKE_CASE__ , )
try:
# 2. Load model file as usual
__UpperCamelCase =hf_hub_download(
SCREAMING_SNAKE_CASE__ , filename=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , user_agent=SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F'{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '
'listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '
'token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '
'login`.' )
except RevisionNotFoundError:
raise EnvironmentError(
F'{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '
'this model name. Check the model page at '
F'\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.' )
except EntryNotFoundError:
raise EnvironmentError(
F'{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.' )
except HTTPError as err:
raise EnvironmentError(
F'There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}' )
except ValueError:
raise EnvironmentError(
F'We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'
F' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'
F' directory containing a file named {weights_name} or'
' \nCheckout your internet connection or see how to run the library in'
' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.' )
except EnvironmentError:
raise EnvironmentError(
F'Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '
'\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '
F'Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '
F'containing a file named {weights_name}' )
| 117 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_A = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
UpperCAmelCase__ : Optional[str] = field(
default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} )
UpperCAmelCase__ : Optional[str] = field(
default=A_ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
UpperCAmelCase__ : Optional[str] = field(
default=A_ , metadata={"help": "The column name of the images in the files."} )
UpperCAmelCase__ : Optional[str] = field(default=A_ , metadata={"help": "A folder containing the training data."} )
UpperCAmelCase__ : Optional[str] = field(default=A_ , metadata={"help": "A folder containing the validation data."} )
UpperCAmelCase__ : Optional[float] = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
UpperCAmelCase__ : Optional[int] = field(
default=A_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=A_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def _a ( self ) -> Optional[int]:
__UpperCamelCase ={}
if self.train_dir is not None:
__UpperCamelCase =self.train_dir
if self.validation_dir is not None:
__UpperCamelCase =self.validation_dir
__UpperCamelCase =data_files if data_files else None
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
UpperCAmelCase__ : str = field(
default=A_ , metadata={
"help": (
"The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."
)
} , )
UpperCAmelCase__ : Optional[str] = field(
default=A_ , metadata={"help": "Pretrained config name or path if not the same as model_name_or_path"} )
UpperCAmelCase__ : Optional[str] = field(
default=A_ , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
UpperCAmelCase__ : Optional[str] = field(
default=A_ , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
UpperCAmelCase__ : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
UpperCAmelCase__ : str = field(default=A_ , metadata={"help": "Name or path of preprocessor config."} )
UpperCAmelCase__ : bool = field(
default=A_ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
UpperCAmelCase__ : float = field(
default=0.75 , metadata={"help": "The ratio of the number of masked tokens in the input sequence."} )
UpperCAmelCase__ : bool = field(
default=A_ , metadata={"help": "Whether or not to train with normalized pixel values as target."} )
@dataclass
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : float = field(
default=1e-3 , metadata={"help": "Base learning rate: absolute_lr = base_lr * total_batch_size / 256."} )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =torch.stack([example['pixel_values'] for example in examples] )
return {"pixel_values": pixel_values}
def _UpperCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__UpperCamelCase =HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mae' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__UpperCamelCase =training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
__UpperCamelCase =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__UpperCamelCase =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
__UpperCamelCase =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
__UpperCamelCase =None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , SCREAMING_SNAKE_CASE__ ) and data_args.train_val_split > 0.0:
__UpperCamelCase =ds['train'].train_test_split(data_args.train_val_split )
__UpperCamelCase =split['train']
__UpperCamelCase =split['test']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCamelCase ={
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
__UpperCamelCase =ViTMAEConfig.from_pretrained(model_args.config_name , **SCREAMING_SNAKE_CASE__ )
elif model_args.model_name_or_path:
__UpperCamelCase =ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE__ )
else:
__UpperCamelCase =ViTMAEConfig()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(F'New config: {config}' )
# adapt config
config.update(
{
'mask_ratio': model_args.mask_ratio,
'norm_pix_loss': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
__UpperCamelCase =ViTImageProcessor.from_pretrained(model_args.image_processor_name , **SCREAMING_SNAKE_CASE__ )
elif model_args.model_name_or_path:
__UpperCamelCase =ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE__ )
else:
__UpperCamelCase =ViTImageProcessor()
# create model
if model_args.model_name_or_path:
__UpperCamelCase =ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
__UpperCamelCase =ViTMAEForPreTraining(SCREAMING_SNAKE_CASE__ )
if training_args.do_train:
__UpperCamelCase =ds['train'].column_names
else:
__UpperCamelCase =ds['validation'].column_names
if data_args.image_column_name is not None:
__UpperCamelCase =data_args.image_column_name
elif "image" in column_names:
__UpperCamelCase ='image'
elif "img" in column_names:
__UpperCamelCase ='img'
else:
__UpperCamelCase =column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
__UpperCamelCase =image_processor.size['shortest_edge']
else:
__UpperCamelCase =(image_processor.size['height'], image_processor.size['width'])
__UpperCamelCase =Compose(
[
Lambda(lambda SCREAMING_SNAKE_CASE__ : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(SCREAMING_SNAKE_CASE__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(SCREAMING_SNAKE_CASE__ : Optional[Any] ):
__UpperCamelCase =[transforms(SCREAMING_SNAKE_CASE__ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
__UpperCamelCase =ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(SCREAMING_SNAKE_CASE__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
__UpperCamelCase =(
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(SCREAMING_SNAKE_CASE__ )
# Compute absolute learning rate
__UpperCamelCase =(
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
__UpperCamelCase =training_args.base_learning_rate * total_train_batch_size / 2_56
# Initialize our trainer
__UpperCamelCase =Trainer(
model=SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=SCREAMING_SNAKE_CASE__ , data_collator=SCREAMING_SNAKE_CASE__ , )
# Training
if training_args.do_train:
__UpperCamelCase =None
if training_args.resume_from_checkpoint is not None:
__UpperCamelCase =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__UpperCamelCase =last_checkpoint
__UpperCamelCase =trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE__ )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__UpperCamelCase =trainer.evaluate()
trainer.log_metrics('eval' , SCREAMING_SNAKE_CASE__ )
trainer.save_metrics('eval' , SCREAMING_SNAKE_CASE__ )
# Write model card and (optionally) push to hub
__UpperCamelCase ={
'tasks': 'masked-auto-encoding',
'dataset': data_args.dataset_name,
'tags': ['masked-auto-encoding'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**SCREAMING_SNAKE_CASE__ )
else:
trainer.create_model_card(**SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 117 | 1 |
"""simple docstring"""
import requests
_SCREAMING_SNAKE_CASE : Tuple = '' # <-- Put your OpenWeatherMap appid here!
_SCREAMING_SNAKE_CASE : Tuple = 'https://api.openweathermap.org/data/2.5/'
def lowerCamelCase__ ( _lowerCamelCase : str = "Chicago" , _lowerCamelCase : str = APPID ) -> dict:
return requests.get(URL_BASE + 'weather' , params=locals() ).json()
def lowerCamelCase__ ( _lowerCamelCase : str = "Kolkata, India" , _lowerCamelCase : str = APPID ) -> dict:
return requests.get(URL_BASE + 'forecast' , params=locals() ).json()
def lowerCamelCase__ ( _lowerCamelCase : float = 55.68 , _lowerCamelCase : float = 12.57 , _lowerCamelCase : str = APPID ) -> dict:
return requests.get(URL_BASE + 'onecall' , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
_SCREAMING_SNAKE_CASE : Optional[Any] = input('''Enter a location:''').strip()
if location:
pprint(current_weather(location))
else:
break
| 183 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowerCamelCase__ :
"""simple docstring"""
@staticmethod
def lowerCamelCase__ ( *UpperCamelCase : List[str] , **UpperCamelCase : Any ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
__a = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def lowerCamelCase__ ( self : List[str] , UpperCamelCase : Tuple , UpperCamelCase : Optional[int] , UpperCamelCase : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" )
__UpperCAmelCase : Tuple = [
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
]
return object_detector, examples
def lowerCamelCase__ ( self : int , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = object_detector(examples[0] , threshold=0.0 )
__UpperCAmelCase : Tuple = len(UpperCamelCase )
self.assertGreater(UpperCamelCase , 0 )
self.assertEqual(
UpperCamelCase , [
{
"""score""": ANY(UpperCamelCase ),
"""label""": ANY(UpperCamelCase ),
"""box""": {"""xmin""": ANY(UpperCamelCase ), """ymin""": ANY(UpperCamelCase ), """xmax""": ANY(UpperCamelCase ), """ymax""": ANY(UpperCamelCase )},
}
for i in range(UpperCamelCase )
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""" )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
pass
@require_torch
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" )
__UpperCAmelCase : Tuple = object_detector(
"""./tests/fixtures/tests_samples/COCO/000000039769.png""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=0.64 , )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"""score""": 0.7235, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7218, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7184, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.6748, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6656, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6614, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6456, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
{"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}},
{"""score""": 0.6419, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
] , )
__UpperCAmelCase : Any = object_detector(
[
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
[
{"""score""": 0.7235, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7218, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7184, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.6748, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6656, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6614, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6456, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
{"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}},
{"""score""": 0.6419, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
]
] , )
@require_torch
@slow
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Any = pipeline("""zero-shot-object-detection""" )
__UpperCAmelCase : int = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
] , )
__UpperCAmelCase : List[str] = object_detector(
[
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
] , )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
[
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
],
[
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
],
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""" )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
pass
@require_torch
@slow
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : List[str] = 0.2
__UpperCAmelCase : List[Any] = pipeline("""zero-shot-object-detection""" )
__UpperCAmelCase : Any = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=UpperCamelCase , )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
] , )
@require_torch
@slow
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = 2
__UpperCAmelCase : Union[str, Any] = pipeline("""zero-shot-object-detection""" )
__UpperCAmelCase : List[str] = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , top_k=UpperCamelCase , )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
] , )
| 115 | 0 |
'''simple docstring'''
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case__ ( __lowercase ):
"""simple docstring"""
lowerCamelCase = (CMStochasticIterativeScheduler,)
lowerCamelCase = 10
def lowerCAmelCase ( self : int , **UpperCamelCase__ : Any ) -> Optional[int]:
"""simple docstring"""
snake_case : str = {
'''num_train_timesteps''': 201,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
config.update(**UpperCAmelCase__ )
return config
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
snake_case : Dict = 10
snake_case : Any = self.get_scheduler_config()
snake_case : Dict = self.scheduler_classes[0](**UpperCAmelCase__ )
scheduler.set_timesteps(UpperCAmelCase__ )
snake_case : Union[str, Any] = scheduler.timesteps[0]
snake_case : Optional[Any] = scheduler.timesteps[1]
snake_case : int = self.dummy_sample
snake_case : Optional[Any] = 0.1 * sample
snake_case : List[Any] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample
snake_case : Optional[Any] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase__ )
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=UpperCAmelCase__ )
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
snake_case : Any = self.scheduler_classes[0]
snake_case : Optional[int] = self.get_scheduler_config()
snake_case : Union[str, Any] = scheduler_class(**UpperCAmelCase__ )
snake_case : List[Any] = 1
scheduler.set_timesteps(UpperCAmelCase__ )
snake_case : Tuple = scheduler.timesteps
snake_case : Optional[int] = torch.manual_seed(0 )
snake_case : Union[str, Any] = self.dummy_model()
snake_case : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(UpperCAmelCase__ ):
# 1. scale model input
snake_case : Any = scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
# 2. predict noise residual
snake_case : Union[str, Any] = model(UpperCAmelCase__ , UpperCAmelCase__ )
# 3. predict previous sample x_t-1
snake_case : Tuple = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ ).prev_sample
snake_case : List[str] = pred_prev_sample
snake_case : List[Any] = torch.sum(torch.abs(UpperCAmelCase__ ) )
snake_case : int = torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_sum.item() - 192.7_614 ) < 1e-2
assert abs(result_mean.item() - 0.2_510 ) < 1e-3
def lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
snake_case : Dict = self.scheduler_classes[0]
snake_case : int = self.get_scheduler_config()
snake_case : Optional[Any] = scheduler_class(**UpperCAmelCase__ )
snake_case : List[Any] = [106, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase__ )
snake_case : Any = scheduler.timesteps
snake_case : Optional[Any] = torch.manual_seed(0 )
snake_case : Union[str, Any] = self.dummy_model()
snake_case : Tuple = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
snake_case : str = scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
# 2. predict noise residual
snake_case : Tuple = model(UpperCAmelCase__ , UpperCAmelCase__ )
# 3. predict previous sample x_t-1
snake_case : List[str] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ ).prev_sample
snake_case : List[Any] = pred_prev_sample
snake_case : str = torch.sum(torch.abs(UpperCAmelCase__ ) )
snake_case : Optional[Any] = torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_sum.item() - 347.6_357 ) < 1e-2
assert abs(result_mean.item() - 0.4_527 ) < 1e-3
def lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
snake_case : Union[str, Any] = self.scheduler_classes[0]
snake_case : str = self.get_scheduler_config()
snake_case : Optional[Any] = scheduler_class(**UpperCAmelCase__ )
snake_case : Union[str, Any] = [39, 30, 12, 15, 0]
with self.assertRaises(UpperCAmelCase__ , msg='''`timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=UpperCAmelCase__ )
def lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
snake_case : int = self.scheduler_classes[0]
snake_case : str = self.get_scheduler_config()
snake_case : Union[str, Any] = scheduler_class(**UpperCAmelCase__ )
snake_case : Any = [39, 30, 12, 1, 0]
snake_case : Union[str, Any] = len(UpperCAmelCase__ )
with self.assertRaises(UpperCAmelCase__ , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase__ , timesteps=UpperCAmelCase__ )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
snake_case : Union[str, Any] = self.scheduler_classes[0]
snake_case : int = self.get_scheduler_config()
snake_case : List[str] = scheduler_class(**UpperCAmelCase__ )
snake_case : Dict = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase__ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase__ )
| 361 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import MutableSequence
class snake_case__ :
"""simple docstring"""
def __init__( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : MutableSequence[float] ) -> None:
"""simple docstring"""
if len(UpperCamelCase__ ) != degree + 1:
raise ValueError(
'''The number of coefficients should be equal to the degree + 1.''' )
snake_case : list[float] = list(UpperCamelCase__ )
snake_case : int = degree
def __add__( self : int , UpperCamelCase__ : Polynomial ) -> Polynomial:
"""simple docstring"""
if self.degree > polynomial_a.degree:
snake_case : Tuple = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , UpperCamelCase__ )
else:
snake_case : List[Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , UpperCamelCase__ )
def __sub__( self : Tuple , UpperCamelCase__ : Polynomial ) -> Polynomial:
"""simple docstring"""
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : int ) -> Polynomial:
"""simple docstring"""
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : Union[str, Any] , UpperCamelCase__ : Polynomial ) -> Polynomial:
"""simple docstring"""
snake_case : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , UpperCamelCase__ )
def lowerCAmelCase ( self : List[str] , UpperCamelCase__ : int | float ) -> int | float:
"""simple docstring"""
snake_case : int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Dict ) -> str:
"""simple docstring"""
snake_case : List[Any] = ''''''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(UpperCamelCase__ )
return polynomial
def __repr__( self : List[str] ) -> str:
"""simple docstring"""
return self.__str__()
def lowerCAmelCase ( self : Any ) -> Polynomial:
"""simple docstring"""
snake_case : list[float] = [0] * self.degree
for i in range(self.degree ):
snake_case : Dict = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , UpperCamelCase__ )
def lowerCAmelCase ( self : int , UpperCamelCase__ : int | float = 0 ) -> Polynomial:
"""simple docstring"""
snake_case : list[float] = [0] * (self.degree + 2)
snake_case : Union[str, Any] = constant
for i in range(self.degree + 1 ):
snake_case : Optional[int] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , UpperCamelCase__ )
def __eq__( self : Any , UpperCamelCase__ : object ) -> bool:
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : Union[str, Any] , UpperCamelCase__ : object ) -> bool:
"""simple docstring"""
return not self.__eq__(UpperCamelCase__ )
| 83 | 0 |
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = ''''''
__lowerCAmelCase = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
__lowerCAmelCase = None # compression type in fsspec. ex: "gzip"
__lowerCAmelCase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__(self : Optional[Any] , _lowerCAmelCase : str = "" , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[dict] = None , **_lowerCAmelCase : Dict ):
super().__init__(self , **_lowerCAmelCase )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
A = fsspec.open(
_lowerCAmelCase , mode="""rb""" , protocol=_lowerCAmelCase , compression=self.compression , client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
A = os.path.basename(self.file.path.split("""::""" )[0] )
A = (
self.compressed_name[: self.compressed_name.rindex(""".""" )]
if """.""" in self.compressed_name
else self.compressed_name
)
A = None
@classmethod
def A (cls : Union[str, Any] , _lowerCAmelCase : Any ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(_lowerCAmelCase ).lstrip("""/""" )
def A (self : Union[str, Any] ):
if self.dir_cache is None:
A = {**self.file.fs.info(self.file.path ), """name""": self.uncompressed_name}
A = {f["""name"""]: f}
def A (self : Any , _lowerCAmelCase : str ):
return self.file.open().read()
def A (self : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : str = "rb" , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Any=True , _lowerCAmelCase : str=None , **_lowerCAmelCase : Any , ):
A = self._strip_protocol(_lowerCAmelCase )
if mode != "rb":
raise ValueError(F"""Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'""" )
return self.file.open()
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = '''bz2'''
__lowerCAmelCase = '''bz2'''
__lowerCAmelCase = '''.bz2'''
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = '''gzip'''
__lowerCAmelCase = '''gzip'''
__lowerCAmelCase = '''.gz'''
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = '''lz4'''
__lowerCAmelCase = '''lz4'''
__lowerCAmelCase = '''.lz4'''
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = '''xz'''
__lowerCAmelCase = '''xz'''
__lowerCAmelCase = '''.xz'''
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = '''zstd'''
__lowerCAmelCase = '''zstd'''
__lowerCAmelCase = '''.zst'''
def __init__(self : Dict , _lowerCAmelCase : str , _lowerCAmelCase : str = "rb" , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[dict] = None , _lowerCAmelCase : int = DEFAULT_BLOCK_SIZE , **_lowerCAmelCase : List[Any] , ):
super().__init__(
fo=_lowerCAmelCase , mode=_lowerCAmelCase , target_protocol=_lowerCAmelCase , target_options=_lowerCAmelCase , block_size=_lowerCAmelCase , **_lowerCAmelCase , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
A = self.file.__enter__
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : Any , _lowerCAmelCase : Dict ):
A = file_
def __enter__(self : Dict ):
self._file.__enter__()
return self
def __exit__(self : Optional[int] , *_lowerCAmelCase : Dict , **_lowerCAmelCase : Optional[Any] ):
self._file.__exit__(*_lowerCAmelCase , **_lowerCAmelCase )
def __iter__(self : Dict ):
return iter(self._file )
def A (self : Optional[Any] ):
return next(self._file )
def __getattr__(self : Union[str, Any] , _lowerCAmelCase : Any ):
return getattr(self._file , _lowerCAmelCase )
def fixed_enter(*_lowerCAmelCase : Tuple , **_lowerCAmelCase : Any ):
return WrappedFile(_enter(*_lowerCAmelCase , **_lowerCAmelCase ) )
A = fixed_enter
| 258 |
'''simple docstring'''
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_lowerCamelCase : int = 10
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
for i in range(UpperCAmelCase , UpperCAmelCase ):
if array[i] == target:
return i
return -1
def __a ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
A = 0
A = len(UpperCAmelCase )
while left <= right:
if right - left < precision:
return lin_search(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
A = (left + right) // 3 + 1
A = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
A = one_third - 1
elif array[two_third] < target:
A = two_third + 1
else:
A = one_third + 1
A = two_third - 1
else:
return -1
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
if left < right:
if right - left < precision:
return lin_search(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
A = (left + right) // 3 + 1
A = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(UpperCAmelCase , one_third - 1 , UpperCAmelCase , UpperCAmelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , UpperCAmelCase , UpperCAmelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase : str = input('Enter numbers separated by comma:\n').strip()
_lowerCamelCase : str = [int(item.strip()) for item in user_input.split(',')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_lowerCamelCase : Optional[int] = int(input('Enter the number to be found in the list:\n').strip())
_lowerCamelCase : Union[str, Any] = ite_ternary_search(collection, target)
_lowerCamelCase : Union[str, Any] = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f"Iterative search: {target} found at positions: {resulta}")
print(f"Recursive search: {target} found at positions: {resulta}")
else:
print('Not found')
| 258 | 1 |
"""simple docstring"""
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if is_torch_version("<" , "2.0.0" ) or not hasattr(_UpperCamelCase , "_dynamo" ):
return False
return isinstance(_UpperCamelCase , torch._dynamo.eval_frame.OptimizedModule )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase = True ):
'''simple docstring'''
__lowerCAmelCase = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__lowerCAmelCase = is_compiled_module(_UpperCamelCase )
if is_compiled:
__lowerCAmelCase = model
__lowerCAmelCase = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase = model.module
if not keep_fpaa_wrapper:
__lowerCAmelCase = getattr(_UpperCamelCase , "forward" )
__lowerCAmelCase = model.__dict__.pop("_original_forward" , _UpperCamelCase )
if original_forward is not None:
while hasattr(_UpperCamelCase , "__wrapped__" ):
__lowerCAmelCase = forward.__wrapped__
if forward == original_forward:
break
__lowerCAmelCase = forward
if getattr(_UpperCamelCase , "_converted_to_transformer_engine" , _UpperCamelCase ):
convert_model(_UpperCamelCase , to_transformer_engine=_UpperCamelCase )
if is_compiled:
__lowerCAmelCase = model
__lowerCAmelCase = compiled_model
return model
def _lowerCamelCase ( ):
'''simple docstring'''
PartialState().wait_for_everyone()
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_UpperCamelCase , _UpperCamelCase )
elif PartialState().local_process_index == 0:
torch.save(_UpperCamelCase , _UpperCamelCase )
@contextmanager
def _lowerCamelCase ( **_UpperCamelCase ):
'''simple docstring'''
for key, value in kwargs.items():
__lowerCAmelCase = str(_UpperCamelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if not hasattr(_UpperCamelCase , "__qualname__" ) and not hasattr(_UpperCamelCase , "__name__" ):
__lowerCAmelCase = getattr(_UpperCamelCase , "__class__" , _UpperCamelCase )
if hasattr(_UpperCamelCase , "__qualname__" ):
return obj.__qualname__
if hasattr(_UpperCamelCase , "__name__" ):
return obj.__name__
return str(_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
for key, value in source.items():
if isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase = destination.setdefault(_UpperCamelCase , {} )
merge_dicts(_UpperCamelCase , _UpperCamelCase )
else:
__lowerCAmelCase = value
return destination
def _lowerCamelCase ( _UpperCamelCase = None ):
'''simple docstring'''
if port is None:
__lowerCAmelCase = 2_9500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 259 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = {}
__lowerCAmelCase = 2
while True:
__lowerCAmelCase = factor_map.pop(_UpperCamelCase , _UpperCamelCase )
if factor:
__lowerCAmelCase = factor + prime
while x in factor_map:
x += factor
__lowerCAmelCase = factor
else:
__lowerCAmelCase = prime
yield prime
prime += 1
def _lowerCamelCase ( _UpperCamelCase = 1e10 ):
'''simple docstring'''
__lowerCAmelCase = sieve()
__lowerCAmelCase = 1
while True:
__lowerCAmelCase = next(_UpperCamelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(_UpperCamelCase )
n += 2
if __name__ == "__main__":
print(solution())
| 259 | 1 |
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
lowerCamelCase : Tuple =logging.getLogger(__name__)
class __a ( UpperCAmelCase__ ):
_lowerCAmelCase : Union[str, Any] = '''token-classification'''
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
if type(__lowercase ) == dict:
UpperCamelCase__ : Optional[int] = Namespace(**__lowercase )
UpperCamelCase__ : Any = import_module("tasks" )
try:
UpperCamelCase__ : str = getattr(__lowercase , hparams.task_type )
UpperCamelCase__ : List[str] = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '
F'Available tasks classes are: {TokenClassificationTask.__subclasses__()}' )
UpperCamelCase__ : str = self.token_classification_task.get_labels(hparams.labels )
UpperCamelCase__ : Any = CrossEntropyLoss().ignore_index
super().__init__(__lowercase , len(self.labels ) , self.mode )
def __lowercase ( self : Dict , **SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
return self.model(**__lowercase )
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
UpperCamelCase__ : str = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCamelCase__ : Dict = self(**__lowercase )
UpperCamelCase__ : Dict = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def __lowercase ( self : str ):
'''simple docstring'''
UpperCamelCase__ : List[str] = self.hparams
for mode in ["train", "dev", "test"]:
UpperCamelCase__ : Union[str, Any] = self._feature_file(__lowercase )
if os.path.exists(__lowercase ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , __lowercase )
UpperCamelCase__ : Optional[int] = torch.load(__lowercase )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
UpperCamelCase__ : List[Any] = self.token_classification_task.read_examples_from_file(args.data_dir , __lowercase )
UpperCamelCase__ : List[str] = self.token_classification_task.convert_examples_to_features(
__lowercase , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["xlnet"] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["xlnet"] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=__lowercase , pad_on_left=bool(self.config.model_type in ["xlnet"] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("Saving features into cached file %s" , __lowercase )
torch.save(__lowercase , __lowercase )
def __lowercase ( self : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : bool = False ):
'''simple docstring'''
UpperCamelCase__ : int = self._feature_file(__lowercase )
logger.info("Loading features from cached file %s" , __lowercase )
UpperCamelCase__ : Union[str, Any] = torch.load(__lowercase )
UpperCamelCase__ : str = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
UpperCamelCase__ : Union[str, Any] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
UpperCamelCase__ : Optional[int] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
UpperCamelCase__ : Any = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
UpperCamelCase__ : List[Any] = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(__lowercase , __lowercase , __lowercase , __lowercase ) , batch_size=__lowercase )
def __lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
"""Compute validation""" ""
UpperCamelCase__ : Dict = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
UpperCamelCase__ : Dict = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCamelCase__ : Dict = self(**__lowercase )
UpperCamelCase__ , UpperCamelCase__ : str = outputs[:2]
UpperCamelCase__ : Dict = logits.detach().cpu().numpy()
UpperCamelCase__ : Any = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __lowercase ( self : Dict , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
UpperCamelCase__ : Dict = torch.stack([x["val_loss"] for x in outputs] ).mean()
UpperCamelCase__ : Dict = np.concatenate([x["pred"] for x in outputs] , axis=0 )
UpperCamelCase__ : List[Any] = np.argmax(__lowercase , axis=2 )
UpperCamelCase__ : Any = np.concatenate([x["target"] for x in outputs] , axis=0 )
UpperCamelCase__ : int = dict(enumerate(self.labels ) )
UpperCamelCase__ : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )]
UpperCamelCase__ : int = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
UpperCamelCase__ : Tuple = {
"val_loss": val_loss_mean,
"accuracy_score": accuracy_score(__lowercase , __lowercase ),
"precision": precision_score(__lowercase , __lowercase ),
"recall": recall_score(__lowercase , __lowercase ),
"f1": fa_score(__lowercase , __lowercase ),
}
UpperCamelCase__ : Union[str, Any] = dict(results.items() )
UpperCamelCase__ : Union[str, Any] = results
return ret, preds_list, out_label_list
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : List[Any] = self._eval_end(__lowercase )
UpperCamelCase__ : int = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Tuple = self._eval_end(__lowercase )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
UpperCamelCase__ : int = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __lowercase ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
BaseTransformer.add_model_specific_args(__lowercase , __lowercase )
parser.add_argument(
"--task_type" , default="NER" , type=__lowercase , help="Task type to fine tune in training (e.g. NER, POS, etc)" )
parser.add_argument(
"--max_seq_length" , default=1_28 , type=__lowercase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--labels" , default="" , type=__lowercase , help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used." , )
parser.add_argument(
"--gpus" , default=0 , type=__lowercase , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
if __name__ == "__main__":
lowerCamelCase : List[str] =argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
lowerCamelCase : Optional[int] =NERTransformer.add_model_specific_args(parser, os.getcwd())
lowerCamelCase : Union[str, Any] =parser.parse_args()
lowerCamelCase : int =NERTransformer(args)
lowerCamelCase : Optional[int] =generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
lowerCamelCase : int =sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt'''), recursive=True))
lowerCamelCase : int =model.load_from_checkpoint(checkpoints[-1])
trainer.test(model) | 189 |
import functools
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
if not isinstance(_A , _A ) or not all(isinstance(_A , _A ) for day in days ):
raise ValueError("The parameter days should be a list of integers" )
if len(_A ) != 3 or not all(isinstance(_A , _A ) for cost in costs ):
raise ValueError("The parameter costs should be a list of three integers" )
if len(_A ) == 0:
return 0
if min(_A ) <= 0:
raise ValueError("All days elements should be greater than 0" )
if max(_A ) >= 366:
raise ValueError("All days elements should be less than 366" )
snake_case_ = set(_A )
@functools.cache
def dynamic_programming(_A ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 187 | 0 |
def lowercase_ ( _lowerCamelCase : int = 10 , _lowerCamelCase : int = 1000 , _lowerCamelCase : bool = True):
assert (
isinstance(_lowerCamelCase , _lowerCamelCase)
and isinstance(_lowerCamelCase , _lowerCamelCase)
and isinstance(_lowerCamelCase , _lowerCamelCase)
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)")
return min_val if option else max_val
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int):
return int((number_a + number_a) / 2)
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int):
assert (
isinstance(_lowerCamelCase , _lowerCamelCase) and isinstance(_lowerCamelCase , _lowerCamelCase) and isinstance(_lowerCamelCase , _lowerCamelCase)
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)")
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value")
def answer(_lowerCamelCase : int) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started...")
lowercase__ : Optional[int] = lower
lowercase__ : List[Any] = higher
lowercase__ : Dict = []
while True:
lowercase__ : Any = get_avg(_lowerCamelCase , _lowerCamelCase)
last_numbers.append(_lowerCamelCase)
if answer(_lowerCamelCase) == "low":
lowercase__ : List[str] = number
elif answer(_lowerCamelCase) == "high":
lowercase__ : Optional[int] = number
else:
break
print(f'''guess the number : {last_numbers[-1]}''')
print(f'''details : {last_numbers!s}''')
def lowercase_ ( ):
lowercase__ : Tuple = int(input("Enter lower value : ").strip())
lowercase__ : Optional[int] = int(input("Enter high value : ").strip())
lowercase__ : Optional[Any] = int(input("Enter value to guess : ").strip())
guess_the_number(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
if __name__ == "__main__":
main()
| 333 | import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__A )
class snake_case_ ( __A ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__A : str = field(default="text-classification" ,metadata={"include_in_asdict_even_if_is_default": True} )
__A : ClassVar[Features] = Features({"text": Value("string" )} )
__A : ClassVar[Features] = Features({"labels": ClassLabel} )
__A : str = "text"
__A : str = "labels"
def __UpperCamelCase ( self : Dict , lowercase_ : Optional[Any] ) -> int:
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowercase_ ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
lowercase__ : Optional[int] = copy.deepcopy(self )
lowercase__ : Tuple = self.label_schema.copy()
lowercase__ : Union[str, Any] = features[self.label_column]
lowercase__ : int = label_schema
return task_template
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 333 | 1 |
import math
def lowerCAmelCase_ ( __A ) -> bool:
'''simple docstring'''
return math.sqrt(__A ) * math.sqrt(__A ) == num
def lowerCAmelCase_ ( __A ) -> bool:
'''simple docstring'''
UpperCAmelCase__ = 0
UpperCAmelCase__ = n
while left <= right:
UpperCAmelCase__ = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
UpperCAmelCase__ = mid - 1
else:
UpperCAmelCase__ = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ :
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=24 , A_=2 , A_=6 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=None , A_=1000 , ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = parent
UpperCamelCase : List[Any] = batch_size
UpperCamelCase : Dict = seq_length
UpperCamelCase : Tuple = is_training
UpperCamelCase : Union[str, Any] = use_input_mask
UpperCamelCase : Tuple = use_token_type_ids
UpperCamelCase : Optional[Any] = use_labels
UpperCamelCase : str = vocab_size
UpperCamelCase : Optional[int] = hidden_size
UpperCamelCase : Any = num_hidden_layers
UpperCamelCase : Optional[Any] = num_attention_heads
UpperCamelCase : Optional[Any] = intermediate_size
UpperCamelCase : Optional[Any] = hidden_act
UpperCamelCase : Union[str, Any] = hidden_dropout_prob
UpperCamelCase : Union[str, Any] = attention_probs_dropout_prob
UpperCamelCase : List[Any] = max_position_embeddings
UpperCamelCase : str = type_vocab_size
UpperCamelCase : Optional[int] = type_sequence_label_size
UpperCamelCase : Dict = initializer_range
UpperCamelCase : int = num_labels
UpperCamelCase : Optional[int] = scope
UpperCamelCase : int = range_bbox
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCamelCase : Union[str, Any] = bbox[i, j, 3]
UpperCamelCase : int = bbox[i, j, 1]
UpperCamelCase : int = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCamelCase : List[str] = bbox[i, j, 2]
UpperCamelCase : Optional[int] = bbox[i, j, 0]
UpperCamelCase : Optional[Any] = t
UpperCamelCase : Dict = None
if self.use_input_mask:
UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCamelCase : str = None
if self.use_token_type_ids:
UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase : Dict = None
UpperCamelCase : int = None
if self.use_labels:
UpperCamelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase : List[Any] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def __UpperCamelCase( self ):
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
UpperCamelCase : Any = LiltModel(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : str = model(A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ )
UpperCamelCase : Optional[int] = model(A_ , bbox=A_ , token_type_ids=A_ )
UpperCamelCase : Any = model(A_ , bbox=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
UpperCamelCase : Any = self.num_labels
UpperCamelCase : Dict = LiltForTokenClassification(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : Dict = model(
A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
UpperCamelCase : Dict = LiltForQuestionAnswering(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : List[str] = model(
A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Tuple = config_and_inputs
UpperCamelCase : Tuple = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class A__ ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
_UpperCAmelCase :Union[str, Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCAmelCase :Optional[Any] = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase :Dict = False
_UpperCAmelCase :Union[str, Any] = False
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
return True
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = LiltModelTester(self )
UpperCamelCase : Optional[int] = ConfigTester(self , config_class=A_ , hidden_size=37 )
def __UpperCamelCase( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase : Union[str, Any] = type
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Dict = LiltModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_torch
@slow
class A__ ( unittest.TestCase ):
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(A_ )
UpperCamelCase : Tuple = torch.tensor([[1, 2]] , device=A_ )
UpperCamelCase : List[str] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=A_ )
# forward pass
with torch.no_grad():
UpperCamelCase : Optional[int] = model(input_ids=A_ , bbox=A_ )
UpperCamelCase : List[str] = torch.Size([1, 2, 768] )
UpperCamelCase : Any = torch.tensor(
[[-0.06_53, 0.09_50, -0.00_61], [-0.05_45, 0.09_26, -0.03_24]] , device=A_ , )
self.assertTrue(outputs.last_hidden_state.shape , A_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , A_ , atol=1e-3 ) )
| 52 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ : Optional[Any] =logging.get_logger(__name__)
lowerCAmelCase__ : Optional[int] ={
'sail/poolformer_s12': 'https://huggingface.co/sail/poolformer_s12/resolve/main/config.json',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = """poolformer"""
def __init__( self , lowerCAmelCase__=3 , lowerCAmelCase__=1_6 , lowerCAmelCase__=1_6 , lowerCAmelCase__=3 , lowerCAmelCase__=4.0 , lowerCAmelCase__=[2, 2, 6, 2] , lowerCAmelCase__=[6_4, 1_2_8, 3_2_0, 5_1_2] , lowerCAmelCase__=[7, 3, 3, 3] , lowerCAmelCase__=[4, 2, 2, 2] , lowerCAmelCase__=[2, 1, 1, 1] , lowerCAmelCase__=4 , lowerCAmelCase__=0.0 , lowerCAmelCase__="gelu" , lowerCAmelCase__=True , lowerCAmelCase__=1E-5 , lowerCAmelCase__=0.02 , **lowerCAmelCase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE_ : List[str] = patch_size
SCREAMING_SNAKE_CASE_ : Tuple = stride
SCREAMING_SNAKE_CASE_ : List[Any] = padding
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pool_size
SCREAMING_SNAKE_CASE_ : Tuple = hidden_sizes
SCREAMING_SNAKE_CASE_ : Tuple = mlp_ratio
SCREAMING_SNAKE_CASE_ : Dict = depths
SCREAMING_SNAKE_CASE_ : List[Any] = patch_sizes
SCREAMING_SNAKE_CASE_ : List[Any] = strides
SCREAMING_SNAKE_CASE_ : int = num_encoder_blocks
SCREAMING_SNAKE_CASE_ : List[Any] = drop_path_rate
SCREAMING_SNAKE_CASE_ : Tuple = hidden_act
SCREAMING_SNAKE_CASE_ : str = use_layer_scale
SCREAMING_SNAKE_CASE_ : List[str] = layer_scale_init_value
SCREAMING_SNAKE_CASE_ : Tuple = initializer_range
super().__init__(**lowerCAmelCase__ )
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = version.parse("""1.11""" )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 2E-3
| 355 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def a__ ( A__, A__, A__, A__, A__ ):
# Load configuration defined in the metadata file
with open(A__ ) as metadata_file:
SCREAMING_SNAKE_CASE_ : List[str] = json.load(A__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = LukeConfig(use_entity_aware_attention=A__, **metadata['model_config'] )
# Load in the weights from the checkpoint_path
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.load(A__, map_location='cpu' )['module']
# Load the entity vocab file
SCREAMING_SNAKE_CASE_ : Union[str, Any] = load_original_entity_vocab(A__ )
# add an entry for [MASK2]
SCREAMING_SNAKE_CASE_ : str = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = XLMRobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
SCREAMING_SNAKE_CASE_ : Any = AddedToken('<ent>', lstrip=A__, rstrip=A__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AddedToken('<ent2>', lstrip=A__, rstrip=A__ )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(A__ )
with open(os.path.join(A__, 'tokenizer_config.json' ), 'r' ) as f:
SCREAMING_SNAKE_CASE_ : str = json.load(A__ )
SCREAMING_SNAKE_CASE_ : List[Any] = 'MLukeTokenizer'
with open(os.path.join(A__, 'tokenizer_config.json' ), 'w' ) as f:
json.dump(A__, A__ )
with open(os.path.join(A__, MLukeTokenizer.vocab_files_names['entity_vocab_file'] ), 'w' ) as f:
json.dump(A__, A__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = MLukeTokenizer.from_pretrained(A__ )
# Initialize the embeddings of the special tokens
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer.convert_tokens_to_ids(['@'] )[0]
SCREAMING_SNAKE_CASE_ : str = tokenizer.convert_tokens_to_ids(['#'] )[0]
SCREAMING_SNAKE_CASE_ : str = state_dict['embeddings.word_embeddings.weight']
SCREAMING_SNAKE_CASE_ : int = word_emb[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE_ : List[str] = word_emb[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE_ : List[str] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
SCREAMING_SNAKE_CASE_ : Tuple = state_dict[bias_name]
SCREAMING_SNAKE_CASE_ : Any = decoder_bias[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = decoder_bias[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
SCREAMING_SNAKE_CASE_ : Tuple = F'''encoder.layer.{layer_index}.attention.self.'''
SCREAMING_SNAKE_CASE_ : Tuple = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE_ : Dict = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE_ : Optional[Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
SCREAMING_SNAKE_CASE_ : Union[str, Any] = state_dict['entity_embeddings.entity_embeddings.weight']
SCREAMING_SNAKE_CASE_ : Union[str, Any] = entity_emb[entity_vocab['[MASK]']].unsqueeze(0 )
SCREAMING_SNAKE_CASE_ : List[str] = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
SCREAMING_SNAKE_CASE_ : List[str] = state_dict['entity_predictions.bias']
SCREAMING_SNAKE_CASE_ : str = entity_prediction_bias[entity_vocab['[MASK]']].unsqueeze(0 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
SCREAMING_SNAKE_CASE_ : Tuple = LukeForMaskedLM(config=A__ ).eval()
state_dict.pop('entity_predictions.decoder.weight' )
state_dict.pop('lm_head.decoder.weight' )
state_dict.pop('lm_head.decoder.bias' )
SCREAMING_SNAKE_CASE_ : str = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('lm_head' ) or key.startswith('entity_predictions' )):
SCREAMING_SNAKE_CASE_ : str = state_dict[key]
else:
SCREAMING_SNAKE_CASE_ : Dict = state_dict[key]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = model.load_state_dict(A__, strict=A__ )
if set(A__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(A__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
SCREAMING_SNAKE_CASE_ : List[str] = MLukeTokenizer.from_pretrained(A__, task='entity_classification' )
SCREAMING_SNAKE_CASE_ : Any = 'ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'
SCREAMING_SNAKE_CASE_ : Dict = (0, 9)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer(A__, entity_spans=[span], return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : List[Any] = model(**A__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE_ : List[str] = torch.Size((1, 3_3, 7_6_8) )
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3], A__, atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE_ : Dict = torch.Size((1, 1, 7_6_8) )
SCREAMING_SNAKE_CASE_ : int = torch.tensor([[-0.14_82, 0.06_09, 0.03_22]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], A__, atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
SCREAMING_SNAKE_CASE_ : Optional[int] = MLukeTokenizer.from_pretrained(A__ )
SCREAMING_SNAKE_CASE_ : Tuple = 'Tokyo is the capital of <mask>.'
SCREAMING_SNAKE_CASE_ : Tuple = (2_4, 3_0)
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer(A__, entity_spans=[span], return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : Tuple = model(**A__ )
SCREAMING_SNAKE_CASE_ : List[Any] = encoding['input_ids'][0].tolist()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = input_ids.index(tokenizer.convert_tokens_to_ids('<mask>' ) )
SCREAMING_SNAKE_CASE_ : Any = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(A__ )
SCREAMING_SNAKE_CASE_ : int = outputs.entity_logits[0][0].argmax().item()
SCREAMING_SNAKE_CASE_ : List[str] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('en:' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(A__ ) )
model.save_pretrained(A__ )
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : Tuple = ['[MASK]', '[PAD]', '[UNK]']
SCREAMING_SNAKE_CASE_ : int = [json.loads(A__ ) for line in open(A__ )]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
for entry in data:
SCREAMING_SNAKE_CASE_ : List[Any] = entry['id']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
SCREAMING_SNAKE_CASE_ : List[Any] = entity_id
break
SCREAMING_SNAKE_CASE_ : int = F'''{language}:{entity_name}'''
SCREAMING_SNAKE_CASE_ : Optional[int] = entity_id
return new_mapping
if __name__ == "__main__":
lowerCAmelCase__ : Any =argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
lowerCAmelCase__ : List[Any] =parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 162 | 0 |
def __magic_name__ ( A : int, A : int ):
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError("both inputs must be positive integers" )
a = str(bin(A ) )
binary_number += "0" * shift_amount
return binary_number
def __magic_name__ ( A : int, A : int ):
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError("both inputs must be positive integers" )
a = str(bin(A ) )[2:]
if shift_amount >= len(A ):
return "0b0"
a = binary_number[: len(A ) - shift_amount]
return "0b" + shifted_binary_number
def __magic_name__ ( A : int, A : int ):
'''simple docstring'''
if number >= 0: # Get binary representation of positive number
a = "0" + str(bin(A ) ).strip("-" )[2:]
else: # Get binary (2's complement) representation of negative number
a = len(bin(A )[3:] ) # Find 2's complement of number
a = bin(abs(A ) - (1 << binary_number_length) )[3:]
a = (
"1" + "0" * (binary_number_length - len(A )) + binary_number
)
if shift_amount >= len(A ):
return "0b" + binary_number[0] * len(A )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(A ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 107 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def __magic_name__ ( A : Union[str, Any], A : str, A : Optional[int]=None, A : List[str]=None ):
'''simple docstring'''
if attention_mask is None:
a = tf.cast(tf.math.not_equal(A, config.pad_token_id ), tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class snake_case__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = OPTConfig
SCREAMING_SNAKE_CASE_ : Dict = {}
SCREAMING_SNAKE_CASE_ : List[str] = """gelu"""
def __init__( self : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any]=13 , __lowerCamelCase : int=7 , __lowerCamelCase : Tuple=True , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : Optional[Any]=99 , __lowerCamelCase : List[Any]=16 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : int=4 , __lowerCamelCase : Any=4 , __lowerCamelCase : Union[str, Any]="gelu" , __lowerCamelCase : Any=0.1 , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : Dict=20 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Any=1 , __lowerCamelCase : Any=0 , __lowerCamelCase : Optional[Any]=16 , __lowerCamelCase : Optional[Any]=16 , ) -> Any:
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = eos_token_id
a = pad_token_id
a = bos_token_id
a = embed_dim
a = word_embed_proj_dim
a = False
def __UpperCAmelCase ( self : str ) -> int:
a = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
a = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
a = tf.concat([input_ids, eos_tensor] , axis=1 )
a = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=__lowerCamelCase , **self.config_updates , )
a = prepare_opt_inputs_dict(__lowerCamelCase , __lowerCamelCase )
return config, inputs_dict
def __UpperCAmelCase ( self : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict ) -> List[str]:
a = TFOPTModel(config=__lowerCamelCase )
a = inputs_dict["input_ids"]
a = input_ids[:1, :]
a = inputs_dict["attention_mask"][:1, :]
a = 1
# first forward pass
a = model(__lowerCamelCase , attention_mask=__lowerCamelCase , use_cache=__lowerCamelCase )
a , a = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
a = ids_tensor((self.batch_size, 3) , config.vocab_size )
a = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
a = tf.concat([input_ids, next_tokens] , axis=-1 )
a = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
a = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
a = model(__lowerCamelCase , attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
a = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
a = output_from_no_past[:, -3:, random_slice_idx]
a = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowerCamelCase , __lowerCamelCase , rtol=1e-3 )
@require_tf
class snake_case__ (_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : Optional[Any] = (TFOPTForCausalLM,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : Tuple = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : List[str] = 10
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
a = TFOPTModelTester(self )
a = ConfigTester(self , config_class=__lowerCamelCase )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
a , a = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(__lowerCamelCase : Tuple , __lowerCamelCase : int ):
if hasattr(__lowerCamelCase , "weight" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(__lowerCamelCase , "weight" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
a = model_class(config=__lowerCamelCase )
a = _get_word_embedding_weight(__lowerCamelCase , model.get_input_embeddings() )
a = _get_word_embedding_weight(__lowerCamelCase , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(__lowerCamelCase )
a = _get_word_embedding_weight(__lowerCamelCase , model.get_input_embeddings() )
a = _get_word_embedding_weight(__lowerCamelCase , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
a = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , __lowerCamelCase )
# check that weights remain the same after resizing
a = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
a = False
self.assertTrue(__lowerCamelCase )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , __lowerCamelCase )
a = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
a = False
self.assertTrue(__lowerCamelCase )
def __magic_name__ ( A : List[Any] ):
'''simple docstring'''
return tf.constant(A, dtype=tf.intaa )
@require_tf
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = 99
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
a = tf.ones((4, 1) , dtype=tf.intaa ) * 2
a = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
a = input_ids.shape[0]
a = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
@slow
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
a = TFOPTModel.from_pretrained("facebook/opt-350m" )
a = _long_tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
a = tf.not_equal(__lowerCamelCase , model.config.pad_token_id )
with tf.GradientTape():
a = model(input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase ).last_hidden_state
a = (1, 11, 5_12)
self.assertEqual(output.shape , __lowerCamelCase )
a = tf.constant(
[[-0.2_873, -1.9_218, -0.3_033], [-1.2_710, -0.1_338, -0.1_902], [0.4_095, 0.1_214, -1.3_121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , __lowerCamelCase , atol=4e-3 ) )
a = tf.function(__lowerCamelCase , jit_compile=__lowerCamelCase )
a = xla_generate(__lowerCamelCase , __lowerCamelCase )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , __lowerCamelCase , atol=4e-2 ) )
@require_tf
@slow
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
super().setUp()
a = "facebook/opt-350m"
def __UpperCAmelCase ( self : Any ) -> Tuple:
a = TFOPTForCausalLM.from_pretrained(self.path_model )
a = GPTaTokenizer.from_pretrained(self.path_model )
a = [
"Today is a beautiful day and I want to",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
a = tokenizer(__lowerCamelCase , return_tensors="tf" , padding=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
a = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
a = tf.constant(
[
[1.3_851, -13.8_923, -10.5_229, -10.7_533, -0.2_309, -10.2_384, -0.5_365, -9.0_947, -5.1_670],
[-4.7_073, -10.6_276, -3.9_415, -21.5_242, -0.2_822, -0.2_822, -0.2_822, -0.2_822, -0.2_822],
[0.6_247, -3.4_229, -8.9_179, -1.4_297, -14.1_650, 1.4_146, -9.0_218, -0.2_703, -0.2_703],
[6.4_783, -1.9_913, -10.7_926, -2.3_336, 1.5_092, -0.9_974, -6.8_213, 1.3_477, 1.3_477],
] )
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-4 ) )
a = tf.function(__lowerCamelCase , jit_compile=__lowerCamelCase )
a = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-4 ) )
@require_tf
@slow
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : Any ) -> str:
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
a = "facebook/opt-125m"
a = [
"Today is a beautiful day and I want to",
"In the city of New York, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
a = []
a = GPTaTokenizer.from_pretrained(__lowerCamelCase )
a = TFOPTForCausalLM.from_pretrained(__lowerCamelCase )
for prompt in self.prompts:
a = tokenizer(__lowerCamelCase , return_tensors="tf" ).input_ids
a = model.generate(__lowerCamelCase , max_length=10 )
a = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
predicted_outputs += generated_string
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : str ) -> Dict:
a = "facebook/opt-350m"
a = GPTaTokenizer.from_pretrained(__lowerCamelCase )
a = TFOPTForCausalLM.from_pretrained(__lowerCamelCase )
a = "left"
# use different length sentences to test batching
a = [
"Hello, my dog is a little",
"Today, I",
]
a = tokenizer(__lowerCamelCase , return_tensors="tf" , padding=__lowerCamelCase )
a = inputs["input_ids"]
a = model.generate(input_ids=__lowerCamelCase , attention_mask=inputs["attention_mask"] )
a = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
a = model.generate(input_ids=__lowerCamelCase )
a = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["attention_mask"][-1] , tf.intaa ) )
a = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
a = model.generate(input_ids=__lowerCamelCase , max_length=model.config.max_length - num_paddings )
a = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
a = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__lowerCamelCase )
a = tokenizer.decode(output_padded[0] , skip_special_tokens=__lowerCamelCase )
a = [
"Hello, my dog is a little bit of a dork.\nI'm a little bit",
"Today, I was in the middle of a conversation with a friend about the",
]
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertListEqual(__lowerCamelCase , [non_padded_sentence, padded_sentence] )
def __UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
a = "facebook/opt-350m"
a = [
"Today is a beautiful day and I want to",
"In the city of San Francisco, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
a = []
a = GPTaTokenizer.from_pretrained(__lowerCamelCase )
a = TFOPTForCausalLM.from_pretrained(__lowerCamelCase )
for prompt in self.prompts:
a = tokenizer(__lowerCamelCase , return_tensors="tf" ).input_ids
a = model.generate(__lowerCamelCase , max_length=10 )
a = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
predicted_outputs += generated_string
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
| 107 | 1 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
a__ : Union[str, Any] = '''2.13.1'''
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('''3.7'''):
raise ImportWarning(
'''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'''
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'''
'''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'''
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
a__ : Any = concatenate_datasets
a__ : List[str] = DownloadConfig
a__ : int = DownloadManager
a__ : List[str] = DownloadMode
a__ : Union[str, Any] = DownloadConfig
a__ : Any = DownloadMode
a__ : Dict = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 19 |
import math
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(a__ )
def UpperCAmelCase_( a__ = 1 / 12_345 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : int = 3
while True:
SCREAMING_SNAKE_CASE : Union[str, Any] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(a__ ):
SCREAMING_SNAKE_CASE : List[str] = int(a__ )
total_partitions += 1
if check_partition_perfect(a__ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(a__ )
integer += 1
if __name__ == "__main__":
print(F"{solution() = }")
| 19 | 1 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_lowerCamelCase ={
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
_lowerCamelCase ={
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
_lowerCamelCase ={
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
_lowerCamelCase ={
"facebook/dpr-ctx_encoder-single-nq-base": 5_12,
"facebook/dpr-ctx_encoder-multiset-base": 5_12,
}
_lowerCamelCase ={
"facebook/dpr-question_encoder-single-nq-base": 5_12,
"facebook/dpr-question_encoder-multiset-base": 5_12,
}
_lowerCamelCase ={
"facebook/dpr-reader-single-nq-base": 5_12,
"facebook/dpr-reader-multiset-base": 5_12,
}
_lowerCamelCase ={
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
_lowerCamelCase ={
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
_lowerCamelCase ={
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase = DPRContextEncoderTokenizer
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase = DPRQuestionEncoderTokenizer
_lowerCamelCase =collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
_lowerCamelCase =collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
_lowerCamelCase =R"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(lowerCamelCase_ )
class a_ :
"""simple docstring"""
def __call__( self : Any ,snake_case : str ,snake_case : Optional[str] = None ,snake_case : Optional[str] = None ,snake_case : Union[bool, str] = False ,snake_case : Union[bool, str] = False ,snake_case : Optional[int] = None ,snake_case : Optional[Union[str, TensorType]] = None ,snake_case : Optional[bool] = None ,**snake_case : int ,):
if titles is None and texts is None:
return super().__call__(
snake_case ,padding=snake_case ,truncation=snake_case ,max_length=snake_case ,return_tensors=snake_case ,return_attention_mask=snake_case ,**snake_case ,)
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE =titles if texts is None else texts
return super().__call__(
snake_case ,snake_case ,padding=snake_case ,truncation=snake_case ,max_length=snake_case ,return_tensors=snake_case ,return_attention_mask=snake_case ,**snake_case ,)
SCREAMING_SNAKE_CASE =titles if not isinstance(snake_case ,snake_case ) else [titles]
SCREAMING_SNAKE_CASE =texts if not isinstance(snake_case ,snake_case ) else [texts]
SCREAMING_SNAKE_CASE =len(snake_case )
SCREAMING_SNAKE_CASE =questions if not isinstance(snake_case ,snake_case ) else [questions] * n_passages
assert len(snake_case ) == len(
snake_case ), f'There should be as many titles than texts but got {len(snake_case )} titles and {len(snake_case )} texts.'
SCREAMING_SNAKE_CASE =super().__call__(snake_case ,snake_case ,padding=snake_case ,truncation=snake_case )['input_ids']
SCREAMING_SNAKE_CASE =super().__call__(snake_case ,add_special_tokens=snake_case ,padding=snake_case ,truncation=snake_case )['input_ids']
SCREAMING_SNAKE_CASE ={
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(snake_case ,snake_case )
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE =[]
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
SCREAMING_SNAKE_CASE =attention_mask
return self.pad(snake_case ,padding=snake_case ,max_length=snake_case ,return_tensors=snake_case )
def _lowerCAmelCase ( self : Dict ,snake_case : BatchEncoding ,snake_case : DPRReaderOutput ,snake_case : int = 16 ,snake_case : int = 64 ,snake_case : int = 4 ,):
SCREAMING_SNAKE_CASE =reader_input['input_ids']
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =reader_output[:3]
SCREAMING_SNAKE_CASE =len(snake_case )
SCREAMING_SNAKE_CASE =sorted(range(snake_case ) ,reverse=snake_case ,key=relevance_logits.__getitem__ )
SCREAMING_SNAKE_CASE =[]
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE =list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE =sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE =sequence_ids.index(self.pad_token_id )
else:
SCREAMING_SNAKE_CASE =len(snake_case )
SCREAMING_SNAKE_CASE =self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=snake_case ,top_spans=snake_case ,)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=snake_case ,start_index=snake_case ,end_index=snake_case ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) )
if len(snake_case ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _lowerCAmelCase ( self : Optional[int] ,snake_case : List[int] ,snake_case : List[int] ,snake_case : int ,snake_case : int ,):
SCREAMING_SNAKE_CASE =[]
for start_index, start_score in enumerate(snake_case ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
SCREAMING_SNAKE_CASE =sorted(snake_case ,key=lambda snake_case : x[1] ,reverse=snake_case )
SCREAMING_SNAKE_CASE =[]
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'Wrong span indices: [{start_index}:{end_index}]'
SCREAMING_SNAKE_CASE =end_index - start_index + 1
assert length <= max_answer_length, f'Span is too long: {length} > {max_answer_length}'
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(snake_case ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(lowerCamelCase_ )
class a_ ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase = ['input_ids', 'attention_mask']
__UpperCAmelCase = DPRReaderTokenizer
| 334 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_lowerCamelCase =2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_lowerCamelCase =50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_lowerCamelCase =0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 10_00))
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =len([g for position, g in enumerate(lowerCAmelCase_ ) if g == main_target[position]] )
return (item, float(lowerCAmelCase_ ))
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =random.randint(0, len(lowerCAmelCase_ ) - 1 )
SCREAMING_SNAKE_CASE =parent_a[:random_slice] + parent_a[random_slice:]
SCREAMING_SNAKE_CASE =parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =list(lowerCAmelCase_ )
if random.uniform(0, 1 ) < MUTATION_PROBABILITY:
SCREAMING_SNAKE_CASE =random.choice(lowerCAmelCase_ )
return "".join(lowerCAmelCase_ )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =[]
# Generate more children proportionally to the fitness score.
SCREAMING_SNAKE_CASE =int(parent_a[1] * 100 ) + 1
SCREAMING_SNAKE_CASE =10 if child_n >= 10 else child_n
for _ in range(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE =population_score[random.randint(0, lowerCAmelCase_ )][0]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =crossover(parent_a[0], lowerCAmelCase_ )
# Append new string to the population list.
pop.append(mutate(lowerCAmelCase_, lowerCAmelCase_ ) )
pop.append(mutate(lowerCAmelCase_, lowerCAmelCase_ ) )
return pop
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = True ):
"""simple docstring"""
if N_POPULATION < N_SELECTED:
SCREAMING_SNAKE_CASE =F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(lowerCAmelCase_ )
# Verify that the target contains no genes besides the ones inside genes variable.
SCREAMING_SNAKE_CASE =sorted({c for c in target if c not in genes} )
if not_in_genes_list:
SCREAMING_SNAKE_CASE =F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(lowerCAmelCase_ )
# Generate random starting population.
SCREAMING_SNAKE_CASE =[]
for _ in range(lowerCAmelCase_ ):
population.append(''.join([random.choice(lowerCAmelCase_ ) for i in range(len(lowerCAmelCase_ ) )] ) )
# Just some logs to know what the algorithms is doing.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowerCAmelCase_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
SCREAMING_SNAKE_CASE =[evaluate(lowerCAmelCase_, lowerCAmelCase_ ) for item in population]
# Check if there is a matching evolution.
SCREAMING_SNAKE_CASE =sorted(lowerCAmelCase_, key=lambda lowerCAmelCase_ : x[1], reverse=lowerCAmelCase_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
SCREAMING_SNAKE_CASE =population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowerCAmelCase_ )
# Normalize population score to be between 0 and 1.
SCREAMING_SNAKE_CASE =[
(item, score / len(lowerCAmelCase_ )) for item, score in population_score
]
# This is selection
for i in range(lowerCAmelCase_ ):
population.extend(select(population_score[int(lowerCAmelCase_ )], lowerCAmelCase_, lowerCAmelCase_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowerCAmelCase_ ) > N_POPULATION:
break
if __name__ == "__main__":
_lowerCamelCase =(
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
_lowerCamelCase =list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase =basic(target_str, genes_list)
print(
f'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
)
| 334 | 1 |
def lowerCamelCase_ ( _a : str ):
'''simple docstring'''
return credit_card_number.startswith(("""34""", """35""", """37""", """4""", """5""", """6""") )
def lowerCamelCase_ ( _a : str ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = credit_card_number
UpperCAmelCase_ : List[str] = 0
UpperCAmelCase_ : Tuple = len(lowerCAmelCase__ ) - 2
for i in range(lowerCAmelCase__ , -1 , -2 ):
# double the value of every second digit
UpperCAmelCase_ : Dict = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
UpperCAmelCase_ : Optional[Any] = cc_number[:i] + str(lowerCAmelCase__ ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(lowerCAmelCase__ ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def lowerCamelCase_ ( _a : str ):
'''simple docstring'''
UpperCAmelCase_ : int = F'''{credit_card_number} is an invalid credit card number because'''
if not credit_card_number.isdigit():
print(F'''{error_message} it has nonnumerical characters.''' )
return False
if not 13 <= len(lowerCAmelCase__ ) <= 16:
print(F'''{error_message} of its length.''' )
return False
if not validate_initial_digits(lowerCAmelCase__ ):
print(F'''{error_message} of its first two digits.''' )
return False
if not luhn_validation(lowerCAmelCase__ ):
print(F'''{error_message} it fails the Luhn check.''' )
return False
print(F'''{credit_card_number} is a valid credit card number.''' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('''4111111111111111''')
validate_credit_card_number('''32323''')
| 360 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
UpperCamelCase_ = TypeVar('''T''')
UpperCamelCase_ = TypeVar('''U''')
class _snake_case ( Generic[T, U] ):
'''simple docstring'''
def __init__( self: List[str] ,lowerCamelCase_: T | None ,lowerCamelCase_: U | None ) -> Optional[int]:
UpperCAmelCase_ : Any = key
UpperCAmelCase_ : List[str] = val
UpperCAmelCase_ : DoubleLinkedListNode[T, U] | None = None
UpperCAmelCase_ : DoubleLinkedListNode[T, U] | None = None
def __repr__( self: Optional[Any] ) -> str:
return (
F'''Node: key: {self.key}, val: {self.val}, '''
F'''has next: {bool(self.next )}, has prev: {bool(self.prev )}'''
)
class _snake_case ( Generic[T, U] ):
'''simple docstring'''
def __init__( self: Tuple ) -> None:
UpperCAmelCase_ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.rear, self.head
def __repr__( self: Tuple ) -> str:
UpperCAmelCase_ : List[str] = ["""DoubleLinkedList"""]
UpperCAmelCase_ : Optional[Any] = self.head
while node.next is not None:
rep.append(str(lowerCamelCase_ ) )
UpperCAmelCase_ : str = node.next
rep.append(str(self.rear ) )
return ",\n ".join(lowerCamelCase_ )
def A__ ( self: Tuple ,lowerCamelCase_: DoubleLinkedListNode[T, U] ) -> None:
UpperCAmelCase_ : str = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
UpperCAmelCase_ : Any = node
UpperCAmelCase_ : Tuple = previous
UpperCAmelCase_ : List[Any] = node
UpperCAmelCase_ : List[str] = self.rear
def A__ ( self: str ,lowerCamelCase_: DoubleLinkedListNode[T, U] ) -> DoubleLinkedListNode[T, U] | None:
if node.prev is None or node.next is None:
return None
UpperCAmelCase_ : str = node.next
UpperCAmelCase_ : Optional[int] = node.prev
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : int = None
return node
class _snake_case ( Generic[T, U] ):
'''simple docstring'''
A__ : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self: Any ,lowerCamelCase_: int ) -> Union[str, Any]:
UpperCAmelCase_ : DoubleLinkedList[T, U] = DoubleLinkedList()
UpperCAmelCase_ : Optional[Any] = capacity
UpperCAmelCase_ : Any = 0
UpperCAmelCase_ : Any = 0
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self: Any ) -> str:
return (
F'''CacheInfo(hits={self.hits}, misses={self.miss}, '''
F'''capacity={self.capacity}, current size={self.num_keys})'''
)
def __contains__( self: Union[str, Any] ,lowerCamelCase_: T ) -> bool:
return key in self.cache
def A__ ( self: Any ,lowerCamelCase_: T ) -> U | None:
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
UpperCAmelCase_ : DoubleLinkedListNode[T, U] = self.cache[key]
UpperCAmelCase_ : Any = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(lowerCamelCase_ )
return node.val
self.miss += 1
return None
def A__ ( self: Optional[Any] ,lowerCamelCase_: T ,lowerCamelCase_: U ) -> None:
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
UpperCAmelCase_ : Tuple = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(lowerCamelCase_ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
UpperCAmelCase_ : Tuple = DoubleLinkedListNode(lowerCamelCase_ ,lowerCamelCase_ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
UpperCAmelCase_ : Tuple = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
UpperCAmelCase_ : int = value
self.list.add(lowerCamelCase_ )
@classmethod
def A__ ( cls: List[str] ,lowerCamelCase_: int = 128 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
def cache_decorator_inner(lowerCamelCase_: Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*lowerCamelCase_: T ) -> U:
if func not in cls.decorator_function_to_instance_map:
UpperCAmelCase_ : Union[str, Any] = LRUCache(lowerCamelCase_ )
UpperCAmelCase_ : Any = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
UpperCAmelCase_ : Optional[Any] = func(*lowerCamelCase_ )
cls.decorator_function_to_instance_map[func].put(args[0] ,lowerCamelCase_ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(lowerCamelCase_ ,"""cache_info""" ,lowerCamelCase_ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 59 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=lowerCAmelCase )
env_command_parser(subparsers=lowerCAmelCase )
launch_command_parser(subparsers=lowerCAmelCase )
tpu_command_parser(subparsers=lowerCAmelCase )
test_command_parser(subparsers=lowerCAmelCase )
# Let's go
SCREAMING_SNAKE_CASE_ : Dict = parser.parse_args()
if not hasattr(lowerCAmelCase , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(lowerCAmelCase )
if __name__ == "__main__":
main()
| 18 | """simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
__A = logging.get_logger(__name__)
__A = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__A = {
'''vocab_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-openqa''': (
'''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-reader''': (
'''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-openqa''': (
'''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-reader''': (
'''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'''
),
},
}
__A = {
'''google/realm-cc-news-pretrained-embedder''': 512,
'''google/realm-cc-news-pretrained-encoder''': 512,
'''google/realm-cc-news-pretrained-scorer''': 512,
'''google/realm-cc-news-pretrained-openqa''': 512,
'''google/realm-orqa-nq-openqa''': 512,
'''google/realm-orqa-nq-reader''': 512,
'''google/realm-orqa-wq-openqa''': 512,
'''google/realm-orqa-wq-reader''': 512,
}
__A = {
'''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-reader''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-reader''': {'''do_lower_case''': True},
}
class _snake_case ( a__ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_INIT_CONFIGURATION
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = RealmTokenizer
def __init__( self : Optional[int] , UpperCAmelCase : Any=None , UpperCAmelCase : List[str]=None , UpperCAmelCase : int=True , UpperCAmelCase : List[Any]="[UNK]" , UpperCAmelCase : Tuple="[SEP]" , UpperCAmelCase : List[str]="[PAD]" , UpperCAmelCase : Tuple="[CLS]" , UpperCAmelCase : List[Any]="[MASK]" , UpperCAmelCase : str=True , UpperCAmelCase : Union[str, Any]=None , **UpperCAmelCase : Any , ):
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
__lowerCamelCase : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase ) != tokenize_chinese_chars
):
__lowerCamelCase : str = getattr(UpperCAmelCase , normalizer_state.pop("type" ) )
__lowerCamelCase : Any = do_lower_case
__lowerCamelCase : List[Any] = strip_accents
__lowerCamelCase : Optional[Any] = tokenize_chinese_chars
__lowerCamelCase : int = normalizer_class(**UpperCAmelCase )
__lowerCamelCase : List[Any] = do_lower_case
def lowerCamelCase__ ( self : Union[str, Any] , UpperCAmelCase : Dict , **UpperCAmelCase : int ):
__lowerCamelCase : Optional[int] = PaddingStrategy.MAX_LENGTH
__lowerCamelCase : List[Any] = text
__lowerCamelCase : Optional[int] = kwargs.pop("text_pair" , UpperCAmelCase )
__lowerCamelCase : List[Any] = kwargs.pop("return_tensors" , UpperCAmelCase )
__lowerCamelCase : Dict = {
"input_ids": [],
"attention_mask": [],
"token_type_ids": [],
}
for idx, candidate_text in enumerate(UpperCAmelCase ):
if batch_text_pair is not None:
__lowerCamelCase : List[str] = batch_text_pair[idx]
else:
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : List[str] = super().__call__(UpperCAmelCase , UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = encoded_candidates.get("input_ids" )
__lowerCamelCase : Optional[int] = encoded_candidates.get("attention_mask" )
__lowerCamelCase : int = encoded_candidates.get("token_type_ids" )
if encoded_input_ids is not None:
output_data["input_ids"].append(UpperCAmelCase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(UpperCAmelCase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = {key: item for key, item in output_data.items() if len(UpperCAmelCase ) != 0}
return BatchEncoding(UpperCAmelCase , tensor_type=UpperCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=None ):
__lowerCamelCase : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
__lowerCamelCase : Tuple = [self.sep_token_id]
__lowerCamelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self : int , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ):
__lowerCamelCase : Any = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase ) | 135 | 0 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : Optional[Any] = inspect.getfile(accelerate.test_utils )
UpperCamelCase : Optional[int] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
UpperCamelCase : Tuple = test_metrics
@require_cpu
def snake_case_ ( self ) -> Dict:
debug_launcher(self.test_metrics.main, num_processes=1 )
@require_cpu
def snake_case_ ( self ) -> Optional[int]:
debug_launcher(self.test_metrics.main )
@require_single_gpu
def snake_case_ ( self ) -> Union[str, Any]:
self.test_metrics.main()
@require_multi_gpu
def snake_case_ ( self ) -> Union[str, Any]:
print(F"""Found {torch.cuda.device_count()} devices.""" )
UpperCamelCase : List[Any] = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(SCREAMING_SNAKE_CASE_, env=os.environ.copy() )
| 103 |
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=99, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=5, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=37, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=512, SCREAMING_SNAKE_CASE_=16, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=4, ) -> Dict:
UpperCamelCase : Optional[int] = parent
UpperCamelCase : Optional[int] = batch_size
UpperCamelCase : Optional[int] = seq_length
UpperCamelCase : Any = is_training
UpperCamelCase : Tuple = use_attention_mask
UpperCamelCase : Dict = use_token_type_ids
UpperCamelCase : Union[str, Any] = use_labels
UpperCamelCase : Any = vocab_size
UpperCamelCase : Any = hidden_size
UpperCamelCase : str = num_hidden_layers
UpperCamelCase : Optional[int] = num_attention_heads
UpperCamelCase : Union[str, Any] = intermediate_size
UpperCamelCase : List[Any] = hidden_act
UpperCamelCase : Any = hidden_dropout_prob
UpperCamelCase : Dict = attention_probs_dropout_prob
UpperCamelCase : int = max_position_embeddings
UpperCamelCase : int = type_vocab_size
UpperCamelCase : Optional[int] = type_sequence_label_size
UpperCamelCase : str = initializer_range
UpperCamelCase : Tuple = num_choices
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCamelCase : Dict = None
if self.use_attention_mask:
UpperCamelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : int = None
if self.use_token_type_ids:
UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
UpperCamelCase : Optional[int] = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=SCREAMING_SNAKE_CASE_, initializer_range=self.initializer_range, )
return config, input_ids, token_type_ids, attention_mask
def snake_case_ ( self ) -> int:
UpperCamelCase : List[str] = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = config_and_inputs
UpperCamelCase : Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def snake_case_ ( self ) -> List[str]:
UpperCamelCase : int = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[int] = config_and_inputs
UpperCamelCase : Dict = True
UpperCamelCase : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class lowerCAmelCase_ ( a__ , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : Optional[Any] = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : int = FlaxRobertaPreLayerNormModelTester(self )
@slow
def snake_case_ ( self ) -> List[str]:
for model_class_name in self.all_model_classes:
UpperCamelCase : List[str] = model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40', from_pt=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def snake_case_ ( self ) -> Dict:
UpperCamelCase : Union[str, Any] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40', from_pt=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]], dtype=jnp.intaa )
UpperCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ )[0]
UpperCamelCase : List[Any] = [1, 11, 5_0265]
self.assertEqual(list(output.shape ), SCREAMING_SNAKE_CASE_ )
# compare the actual values for a slice.
UpperCamelCase : Optional[int] = np.array(
[[[40.48_80, 18.01_99, -5.23_67], [-1.88_77, -4.08_85, 10.70_85], [-2.26_13, -5.61_10, 7.26_65]]], dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) )
@slow
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : Optional[int] = FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40', from_pt=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]], dtype=jnp.intaa )
UpperCamelCase : Dict = model(SCREAMING_SNAKE_CASE_ )[0]
# compare the actual values for a slice.
UpperCamelCase : Any = np.array(
[[[0.02_08, -0.03_56, 0.02_37], [-0.15_69, -0.04_11, -0.26_26], [0.18_79, 0.01_25, -0.00_89]]], dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) )
| 103 | 1 |
'''simple docstring'''
_lowercase : Tuple = 8.3_1_4_4_6_2 # Unit - J mol-1 K-1
def snake_case_ ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def snake_case_ ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 93 |
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : Optional[int] = int(__SCREAMING_SNAKE_CASE )
if decimal in (0, 1): # Exit cases for the recursion
return str(__SCREAMING_SNAKE_CASE )
lowercase_ , lowercase_ : List[str] = divmod(__SCREAMING_SNAKE_CASE , 2 )
return binary_recursive(__SCREAMING_SNAKE_CASE ) + str(__SCREAMING_SNAKE_CASE )
def snake_case_ ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : str = str(__SCREAMING_SNAKE_CASE ).strip()
if not number:
raise ValueError('''No input value was provided''' )
lowercase_ : Optional[int] = '''-''' if number.startswith('''-''' ) else ''''''
lowercase_ : Union[str, Any] = number.lstrip('''-''' )
if not number.isnumeric():
raise ValueError('''Input value is not an integer''' )
return F'''{negative}0b{binary_recursive(int(__SCREAMING_SNAKE_CASE ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 93 | 1 |
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def _SCREAMING_SNAKE_CASE ( ) -> List[str]:
'''simple docstring'''
__UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument(
"-m" , "--pretrained_model_name_or_path" , type=_lowerCamelCase , default=_lowerCamelCase , required=_lowerCamelCase , help="Path to pretrained model or model identifier from huggingface.co/models." , )
parser.add_argument(
"-c" , "--caption" , type=_lowerCamelCase , default="robotic cat with wings" , help="Text used to generate images." , )
parser.add_argument(
"-n" , "--images_num" , type=_lowerCamelCase , default=4 , help="How much images to generate." , )
parser.add_argument(
"-s" , "--seed" , type=_lowerCamelCase , default=42 , help="Seed for random process." , )
parser.add_argument(
"-ci" , "--cuda_id" , type=_lowerCamelCase , default=0 , help="cuda_id." , )
__UpperCamelCase : Optional[int] = parser.parse_args()
return args
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str) -> int:
'''simple docstring'''
if not len(_lowerCamelCase) == rows * cols:
raise ValueError("The specified number of rows and columns are not correct.")
__UpperCamelCase : Optional[Any] = imgs[0].size
__UpperCamelCase : Union[str, Any] = Image.new("RGB" , size=(cols * w, rows * h))
__UpperCamelCase : Any = grid.size
for i, img in enumerate(_lowerCamelCase):
grid.paste(_lowerCamelCase , box=(i % cols * w, i // cols * h))
return grid
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str , _lowerCamelCase : str="robotic cat with wings" , _lowerCamelCase : Union[str, Any]=7.5 , _lowerCamelCase : Any=50 , _lowerCamelCase : Tuple=1 , _lowerCamelCase : Optional[int]=42 , ) -> Tuple:
'''simple docstring'''
__UpperCamelCase : Tuple = torch.Generator(pipeline.device).manual_seed(_lowerCamelCase)
__UpperCamelCase : List[Any] = pipeline(
_lowerCamelCase , guidance_scale=_lowerCamelCase , num_inference_steps=_lowerCamelCase , generator=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , ).images
__UpperCamelCase : str = int(math.sqrt(_lowerCamelCase))
__UpperCamelCase : Optional[Any] = image_grid(_lowerCamelCase , rows=_rows , cols=num_images_per_prompt // _rows)
return grid, images
lowercase : int = parse_args()
# Load models and create wrapper for stable diffusion
lowercase : List[str] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
lowercase : Dict = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
lowercase : Union[str, Any] = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
lowercase : int = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
lowercase : str = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
lowercase : List[Any] = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
lowercase : List[str] = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
lowercase : List[Any] = unet.to(torch.device('cuda', args.cuda_id))
lowercase : int = pipeline.to(unet.device)
lowercase : Dict = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
lowercase : Any = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1))) | 363 |
import random
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int , _lowerCamelCase : float , _lowerCamelCase : bool = False) -> dict:
'''simple docstring'''
__UpperCamelCase : dict = {i: [] for i in range(_lowerCamelCase)}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(_lowerCamelCase)
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(_lowerCamelCase):
for j in range(i + 1 , _lowerCamelCase):
if random.random() < probability:
graph[i].append(_lowerCamelCase)
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(_lowerCamelCase)
return graph
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int) -> dict:
'''simple docstring'''
return {
i: [j for j in range(_lowerCamelCase) if i != j] for i in range(_lowerCamelCase)
}
if __name__ == "__main__":
import doctest
doctest.testmod() | 151 | 0 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : str ) -> bool:
'''simple docstring'''
UpperCAmelCase_ = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 1 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowerCAmelCase ( )-> int:
lowerCAmelCase_ : int = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' )
lowerCAmelCase_ : Dict = parser.add_subparsers(help='''transformers-cli command helpers''' )
# Register commands
ConvertCommand.register_subcommand(lowerCAmelCase_ )
DownloadCommand.register_subcommand(lowerCAmelCase_ )
EnvironmentCommand.register_subcommand(lowerCAmelCase_ )
RunCommand.register_subcommand(lowerCAmelCase_ )
ServeCommand.register_subcommand(lowerCAmelCase_ )
UserCommands.register_subcommand(lowerCAmelCase_ )
AddNewModelCommand.register_subcommand(lowerCAmelCase_ )
AddNewModelLikeCommand.register_subcommand(lowerCAmelCase_ )
LfsCommands.register_subcommand(lowerCAmelCase_ )
PTtoTFCommand.register_subcommand(lowerCAmelCase_ )
# Let's go
lowerCAmelCase_ : Union[str, Any] = parser.parse_args()
if not hasattr(lowerCAmelCase_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
lowerCAmelCase_ : List[Any] = args.func(lowerCAmelCase_ )
service.run()
if __name__ == "__main__":
main() | 262 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
__lowerCAmelCase : Tuple =False
@skip_mps
class _A ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
snake_case__ : Optional[Any] = StableDiffusionAttendAndExcitePipeline
snake_case__ : Optional[Any] = False
snake_case__ : Dict = TEXT_TO_IMAGE_PARAMS
snake_case__ : int = TEXT_TO_IMAGE_BATCH_PARAMS.union({'token_indices'} )
snake_case__ : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
snake_case__ : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def A__ ( cls ):
"""simple docstring"""
super().setUpClass()
torch.use_deterministic_algorithms(__lowerCAmelCase )
@classmethod
def A__ ( cls ):
"""simple docstring"""
super().tearDownClass()
torch.use_deterministic_algorithms(__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__lowerCAmelCase , )
lowercase = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__lowerCAmelCase , set_alpha_to_one=__lowerCAmelCase , )
torch.manual_seed(0 )
lowercase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
lowercase = CLIPTextModel(__lowerCAmelCase )
lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase=0 ):
"""simple docstring"""
if str(__lowerCAmelCase ).startswith("""mps""" ):
lowercase = torch.manual_seed(__lowerCAmelCase )
else:
lowercase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
lowercase = lowercase = {
"""prompt""": """a cat and a frog""",
"""token_indices""": [2, 5],
"""generator""": generator,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""max_iter_to_alter""": 2,
"""thresholds""": {0: 0.7},
}
return inputs
def A__ ( self ):
"""simple docstring"""
lowercase = """cpu"""
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = self.get_dummy_inputs(__lowerCAmelCase )
lowercase = pipe(**__lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
lowercase = np.array(
[0.6_3_9_0_5_3_6_4, 0.6_2_8_9_7_3_0_7, 0.4_8_5_9_9_0_1_7, 0.5_1_3_3_6_2_4, 0.5_5_5_0_0_4_8, 0.4_5_7_6_9_5_1_6, 0.5_0_3_2_6_9_7_3, 0.5_0_2_3_1_3_9, 0.4_5_3_8_4_4_9_6] )
lowercase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowerCAmelCase , 1E-3 )
def A__ ( self ):
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def A__ ( self ):
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def A__ ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 )
def A__ ( self ):
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def A__ ( self ):
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def A__ ( self ):
"""simple docstring"""
super().test_save_load_local(expected_max_difference=5E-4 )
def A__ ( self ):
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class _A ( unittest.TestCase ):
@classmethod
def A__ ( cls ):
"""simple docstring"""
super().setUpClass()
torch.use_deterministic_algorithms(__lowerCAmelCase )
@classmethod
def A__ ( cls ):
"""simple docstring"""
super().tearDownClass()
torch.use_deterministic_algorithms(__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ):
"""simple docstring"""
lowercase = torch.manual_seed(51 )
lowercase = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , safety_checker=__lowerCAmelCase , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
lowercase = """a painting of an elephant with glasses"""
lowercase = [5, 7]
lowercase = pipe(
prompt=__lowerCAmelCase , token_indices=__lowerCAmelCase , guidance_scale=7.5 , generator=__lowerCAmelCase , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0]
lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" )
assert np.abs((expected_image - image).max() ) < 5E-1
| 32 | """simple docstring"""
import enum
import shutil
import sys
__lowerCAmelCase , __lowerCAmelCase : List[str] =shutil.get_terminal_size()
__lowerCAmelCase : Union[str, Any] ={"""UP""": """A""", """DOWN""": """B""", """RIGHT""": """C""", """LEFT""": """D"""}
class _A ( enum.Enum ):
snake_case__ : Tuple = 0
snake_case__ : List[str] = 1
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Union[str, Any]="" ) -> List[Any]:
'''simple docstring'''
sys.stdout.write(str(lowerCAmelCase__ ) + end )
sys.stdout.flush()
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[Any]="" ) -> Optional[Any]:
'''simple docstring'''
forceWrite(f'\u001b[{color}m{content}\u001b[0m' , lowerCAmelCase__ )
def UpperCAmelCase__ ( ) -> Dict:
'''simple docstring'''
forceWrite("""\r""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :str ) -> List[Any]:
'''simple docstring'''
forceWrite(f'\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}' )
def UpperCAmelCase__ ( ) -> int:
'''simple docstring'''
forceWrite(""" """ * TERMINAL_WIDTH )
reset_cursor()
def UpperCAmelCase__ ( ) -> Dict:
'''simple docstring'''
reset_cursor()
forceWrite("""-""" * TERMINAL_WIDTH )
| 32 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
# Initialise PyTorch model
__lowerCamelCase = LxmertConfig.from_json_file(UpperCamelCase__ )
print(f"""Building PyTorch model from configuration: {config}""" )
__lowerCamelCase = LxmertForPreTraining(UpperCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
__UpperCAmelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__UpperCAmelCase =parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 67 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class lowercase__ ( unittest.TestCase ):
def __init__( self : List[str] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : List[str]=13 ,lowerCamelCase__ : Dict=7 ,lowerCamelCase__ : Union[str, Any]=True ,lowerCamelCase__ : Any=True ,lowerCamelCase__ : List[Any]=True ,lowerCamelCase__ : Any=True ,lowerCamelCase__ : Dict=99 ,lowerCamelCase__ : int=32 ,lowerCamelCase__ : Tuple=5 ,lowerCamelCase__ : Dict=4 ,lowerCamelCase__ : Any=37 ,lowerCamelCase__ : str="gelu" ,lowerCamelCase__ : List[Any]=0.1 ,lowerCamelCase__ : Optional[Any]=0.1 ,lowerCamelCase__ : Optional[Any]=512 ,lowerCamelCase__ : Any=16 ,lowerCamelCase__ : Tuple=2 ,lowerCamelCase__ : int=0.0_2 ,lowerCamelCase__ : int=4 ,):
'''simple docstring'''
_UpperCamelCase : List[Any] = parent
_UpperCamelCase : Dict = batch_size
_UpperCamelCase : Union[str, Any] = seq_length
_UpperCamelCase : Optional[Any] = is_training
_UpperCamelCase : Optional[int] = use_attention_mask
_UpperCamelCase : Any = use_token_type_ids
_UpperCamelCase : str = use_labels
_UpperCamelCase : Any = vocab_size
_UpperCamelCase : List[Any] = hidden_size
_UpperCamelCase : Dict = num_hidden_layers
_UpperCamelCase : Dict = num_attention_heads
_UpperCamelCase : str = intermediate_size
_UpperCamelCase : int = hidden_act
_UpperCamelCase : Any = hidden_dropout_prob
_UpperCamelCase : Any = attention_probs_dropout_prob
_UpperCamelCase : List[str] = max_position_embeddings
_UpperCamelCase : Optional[int] = type_vocab_size
_UpperCamelCase : str = type_sequence_label_size
_UpperCamelCase : Dict = initializer_range
_UpperCamelCase : List[Any] = num_choices
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_UpperCamelCase : Union[str, Any] = None
if self.use_attention_mask:
_UpperCamelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase : Any = DistilBertConfig(
vocab_size=self.vocab_size ,dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,hidden_dim=self.intermediate_size ,hidden_act=self.hidden_act ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,tie_weights_=lowerCamelCase__ ,)
return config, input_ids, attention_mask
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : List[str] = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : List[Any] = config_and_inputs
_UpperCamelCase : Optional[int] = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class lowercase__ ( lowercase , unittest.TestCase ):
lowercase__ = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : List[str] = FlaxDistilBertModelTester(self )
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCamelCase : Dict = model_class_name.from_pretrained('distilbert-base-uncased' )
_UpperCamelCase : Optional[int] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class lowercase__ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = FlaxDistilBertModel.from_pretrained('distilbert-base-uncased' )
_UpperCamelCase : List[Any] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_UpperCamelCase : Tuple = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_UpperCamelCase : Dict = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ )[0]
_UpperCamelCase : Any = (1, 11, 768)
self.assertEqual(output.shape ,lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = np.array([[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] ,lowerCamelCase__ ,atol=1E-4 ) )
| 83 | 0 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
a__: Optional[Any] = logging.get_logger(__name__)
a__: Tuple = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
a__: Optional[Any] = {
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
a__: Tuple = {'allegro/herbert-base-cased': 514}
a__: List[str] = {}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = HerbertTokenizer
def __init__( self,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase="<s>",__lowerCamelCase="<unk>",__lowerCamelCase="<pad>",__lowerCamelCase="<mask>",__lowerCamelCase="</s>",**__lowerCamelCase,):
super().__init__(
_SCREAMING_SNAKE_CASE,_SCREAMING_SNAKE_CASE,tokenizer_file=_SCREAMING_SNAKE_CASE,cls_token=_SCREAMING_SNAKE_CASE,unk_token=_SCREAMING_SNAKE_CASE,pad_token=_SCREAMING_SNAKE_CASE,mask_token=_SCREAMING_SNAKE_CASE,sep_token=_SCREAMING_SNAKE_CASE,**_SCREAMING_SNAKE_CASE,)
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None ):
A__ = [self.cls_token_id]
A__ = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None,__lowerCamelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE,token_ids_a=_SCREAMING_SNAKE_CASE,already_has_special_tokens=_SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None ):
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None ):
A__ = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE,name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
| 368 |
def UpperCamelCase__( UpperCamelCase__ : int = 1_00 )->int:
A__ = (n * (n + 1) // 2) ** 2
A__ = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F"{solution() = }")
| 39 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : Tuple = logging.get_logger(__name__)
__A : List[str] = {
"sail/poolformer_s12": "https://huggingface.co/sail/poolformer_s12/resolve/main/config.json",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : Optional[int] = "poolformer"
def __init__( self : List[str] , A : int=3 , A : List[Any]=16 , A : str=16 , A : List[Any]=3 , A : int=4.0 , A : str=[2, 2, 6, 2] , A : Tuple=[64, 1_28, 3_20, 5_12] , A : int=[7, 3, 3, 3] , A : str=[4, 2, 2, 2] , A : Union[str, Any]=[2, 1, 1, 1] , A : List[str]=4 , A : List[str]=0.0 , A : Any="gelu" , A : List[str]=True , A : Union[str, Any]=1e-5 , A : str=0.02 , **A : List[Any] , ) -> Dict:
lowercase_ : List[str] = num_channels
lowercase_ : List[Any] = patch_size
lowercase_ : Tuple = stride
lowercase_ : Tuple = padding
lowercase_ : List[str] = pool_size
lowercase_ : List[str] = hidden_sizes
lowercase_ : str = mlp_ratio
lowercase_ : Optional[Any] = depths
lowercase_ : Optional[int] = patch_sizes
lowercase_ : str = strides
lowercase_ : Union[str, Any] = num_encoder_blocks
lowercase_ : Optional[Any] = drop_path_rate
lowercase_ : int = hidden_act
lowercase_ : List[str] = use_layer_scale
lowercase_ : Dict = layer_scale_init_value
lowercase_ : List[str] = initializer_range
super().__init__(**__UpperCamelCase )
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : Tuple = version.parse("1.11" )
@property
def A ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def A ( self : Tuple ) -> float:
return 2e-3
| 33 |
"""simple docstring"""
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def lowercase ( _SCREAMING_SNAKE_CASE : Features ):
'''simple docstring'''
_UpperCAmelCase = np.inf
def set_batch_size(_SCREAMING_SNAKE_CASE : FeatureType ) -> None:
nonlocal batch_size
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = min(_SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = min(_SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and feature.dtype == "binary":
_UpperCAmelCase = min(_SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return None if batch_size is np.inf else batch_size
class _a ( lowerCAmelCase):
"""simple docstring"""
def __init__( self : Optional[Any] , __UpperCamelCase : NestedDataStructureLike[PathLike] , __UpperCamelCase : Optional[NamedSplit] = None , __UpperCamelCase : Optional[Features] = None , __UpperCamelCase : str = None , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : Optional[int] = None , **__UpperCamelCase : int , )->Union[str, Any]:
super().__init__(
__UpperCamelCase , split=__UpperCamelCase , features=__UpperCamelCase , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase , streaming=__UpperCamelCase , num_proc=__UpperCamelCase , **__UpperCamelCase , )
_UpperCAmelCase = path_or_paths if isinstance(__UpperCamelCase , __UpperCamelCase ) else {self.split: path_or_paths}
_UpperCAmelCase = _PACKAGED_DATASETS_MODULES['''parquet'''][1]
_UpperCAmelCase = Parquet(
cache_dir=__UpperCamelCase , data_files=__UpperCamelCase , features=__UpperCamelCase , hash=__UpperCamelCase , **__UpperCamelCase , )
def lowercase__ ( self : Union[str, Any] )->Dict:
# Build iterable dataset
if self.streaming:
_UpperCAmelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
self.builder.download_and_prepare(
download_config=__UpperCamelCase , download_mode=__UpperCamelCase , verification_mode=__UpperCamelCase , base_path=__UpperCamelCase , num_proc=self.num_proc , )
_UpperCAmelCase = self.builder.as_dataset(
split=self.split , verification_mode=__UpperCamelCase , in_memory=self.keep_in_memory )
return dataset
class _a :
"""simple docstring"""
def __init__( self : Optional[int] , __UpperCamelCase : Dataset , __UpperCamelCase : Union[PathLike, BinaryIO] , __UpperCamelCase : Optional[int] = None , **__UpperCamelCase : Tuple , )->Optional[int]:
_UpperCAmelCase = dataset
_UpperCAmelCase = path_or_buf
_UpperCAmelCase = batch_size or get_writer_batch_size(dataset.features )
_UpperCAmelCase = parquet_writer_kwargs
def lowercase__ ( self : Optional[int] )->int:
_UpperCAmelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , '''wb+''' ) as buffer:
_UpperCAmelCase = self._write(file_obj=__UpperCamelCase , batch_size=__UpperCamelCase , **self.parquet_writer_kwargs )
else:
_UpperCAmelCase = self._write(file_obj=self.path_or_buf , batch_size=__UpperCamelCase , **self.parquet_writer_kwargs )
return written
def lowercase__ ( self : int , __UpperCamelCase : BinaryIO , __UpperCamelCase : int , **__UpperCamelCase : int )->int:
_UpperCAmelCase = 0
_UpperCAmelCase = parquet_writer_kwargs.pop('''path_or_buf''' , __UpperCamelCase )
_UpperCAmelCase = self.dataset.features.arrow_schema
_UpperCAmelCase = pq.ParquetWriter(__UpperCamelCase , schema=__UpperCamelCase , **__UpperCamelCase )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , __UpperCamelCase ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ):
_UpperCAmelCase = query_table(
table=self.dataset._data , key=slice(__UpperCamelCase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(__UpperCamelCase )
written += batch.nbytes
writer.close()
return written
| 260 | 0 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_UpperCAmelCase : Any = logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] = {"vocab_file": "spiece.model"}
_UpperCAmelCase : str = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
}
}
_UpperCAmelCase : Optional[Any] = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
# Segments (not really needed)
_UpperCAmelCase : Dict = 0
_UpperCAmelCase : Tuple = 1
_UpperCAmelCase : Any = 2
_UpperCAmelCase : Optional[Any] = 3
_UpperCAmelCase : Dict = 4
class __lowerCAmelCase ( __lowercase):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = '''left'''
def __init__( self: int , _lowerCAmelCase: str , _lowerCAmelCase: int=False , _lowerCAmelCase: int=True , _lowerCAmelCase: List[str]=False , _lowerCAmelCase: Any="<s>" , _lowerCAmelCase: List[str]="</s>" , _lowerCAmelCase: int="<unk>" , _lowerCAmelCase: Dict="<sep>" , _lowerCAmelCase: str="<pad>" , _lowerCAmelCase: Tuple="<cls>" , _lowerCAmelCase: Optional[Any]="<mask>" , _lowerCAmelCase: List[str]=["<eop>", "<eod>"] , _lowerCAmelCase: Optional[Dict[str, Any]] = None , **_lowerCAmelCase: Dict , ):
lowercase :Tuple = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token
lowercase :Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowerCAmelCase , remove_space=_lowerCAmelCase , keep_accents=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
lowercase :Tuple = 3
lowercase :int = do_lower_case
lowercase :Optional[int] = remove_space
lowercase :Optional[int] = keep_accents
lowercase :List[Any] = vocab_file
lowercase :Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCAmelCase )
@property
def SCREAMING_SNAKE_CASE ( self: Tuple ):
return len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self: List[Any] ):
lowercase :Any = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self: Tuple ):
lowercase :Optional[Any] = self.__dict__.copy()
lowercase :List[Any] = None
return state
def __setstate__( self: Union[str, Any] , _lowerCAmelCase: Optional[int] ):
lowercase :Union[str, Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowercase :int = {}
lowercase :List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self: Dict , _lowerCAmelCase: str ):
if self.remove_space:
lowercase :Optional[int] = """ """.join(inputs.strip().split() )
else:
lowercase :List[str] = inputs
lowercase :Optional[Any] = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
lowercase :int = unicodedata.normalize("NFKD" , _lowerCAmelCase )
lowercase :int = """""".join([c for c in outputs if not unicodedata.combining(_lowerCAmelCase )] )
if self.do_lower_case:
lowercase :Optional[Any] = outputs.lower()
return outputs
def SCREAMING_SNAKE_CASE ( self: List[Any] , _lowerCAmelCase: str ):
lowercase :List[Any] = self.preprocess_text(_lowerCAmelCase )
lowercase :Dict = self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
lowercase :Optional[Any] = []
for piece in pieces:
if len(_lowerCAmelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
lowercase :Any = self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowerCAmelCase , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowercase :List[str] = cur_pieces[1:]
else:
lowercase :Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_lowerCAmelCase )
else:
new_pieces.append(_lowerCAmelCase )
return new_pieces
def SCREAMING_SNAKE_CASE ( self: Tuple , _lowerCAmelCase: str ):
return self.sp_model.PieceToId(_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Any , _lowerCAmelCase: Optional[int] ):
return self.sp_model.IdToPiece(_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: str , _lowerCAmelCase: Optional[Any] ):
lowercase :Optional[int] = """""".join(_lowerCAmelCase ).replace(_lowerCAmelCase , " " ).strip()
return out_string
def SCREAMING_SNAKE_CASE ( self: List[Any] , _lowerCAmelCase: List[int] , _lowerCAmelCase: bool = False , _lowerCAmelCase: bool = None , _lowerCAmelCase: bool = True , **_lowerCAmelCase: Optional[int] , ):
lowercase :Union[str, Any] = kwargs.pop("use_source_tokenizer" , _lowerCAmelCase )
lowercase :Any = self.convert_ids_to_tokens(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowercase :Dict = []
lowercase :Any = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowerCAmelCase ) )
lowercase :Optional[int] = []
sub_texts.append(_lowerCAmelCase )
else:
current_sub_text.append(_lowerCAmelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowerCAmelCase ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
lowercase :Tuple = """""".join(_lowerCAmelCase )
lowercase :Optional[Any] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowercase :Dict = self.clean_up_tokenization(_lowerCAmelCase )
return clean_text
else:
return text
def SCREAMING_SNAKE_CASE ( self: List[Any] , _lowerCAmelCase: List[int] , _lowerCAmelCase: Optional[List[int]] = None ):
lowercase :Union[str, Any] = [self.sep_token_id]
lowercase :Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: List[int] , _lowerCAmelCase: Optional[List[int]] = None , _lowerCAmelCase: bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is not None:
return ([0] * len(_lowerCAmelCase )) + [1] + ([0] * len(_lowerCAmelCase )) + [1, 1]
return ([0] * len(_lowerCAmelCase )) + [1, 1]
def SCREAMING_SNAKE_CASE ( self: int , _lowerCAmelCase: List[int] , _lowerCAmelCase: Optional[List[int]] = None ):
lowercase :Any = [self.sep_token_id]
lowercase :Optional[int] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: str , _lowerCAmelCase: Optional[str] = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowercase :int = os.path.join(
_lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase , "wb" ) as fi:
lowercase :Dict = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
| 363 |
def UpperCAmelCase__ ( ):
lowercase :List[str] = 0
for i in range(1, 1001 ):
total += i**i
return str(lowerCamelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 158 | 0 |
"""simple docstring"""
import numpy as np
from transformers import Pipeline
def __A ( a_ :str) -> int:
__a : int = np.max(A__ , axis=-1 , keepdims=A__)
__a : Any = np.exp(outputs - maxes)
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=A__)
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
def _lowerCamelCase ( self , **_UpperCAmelCase ):
__a : List[str] = {}
if "second_text" in kwargs:
__a : List[str] = kwargs['''second_text''']
return preprocess_kwargs, {}, {}
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase=None ):
return self.tokenizer(_UpperCAmelCase , text_pair=_UpperCAmelCase , return_tensors=self.framework )
def _lowerCamelCase ( self , _UpperCAmelCase ):
return self.model(**_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : Any = model_outputs.logits[0].numpy()
__a : List[Any] = softmax(_UpperCAmelCase )
__a : Optional[Any] = np.argmax(_UpperCAmelCase )
__a : List[Any] = self.model.config.idalabel[best_class]
__a : str = probabilities[best_class].item()
__a : Union[str, Any] = logits.tolist()
return {"label": label, "score": score, "logits": logits} | 160 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class __lowerCAmelCase ( TensorFormatter[Mapping, """torch.Tensor""", Mapping] ):
def __init__( self , lowerCAmelCase=None , **lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
super().__init__(features=lowerCAmelCase )
_lowercase =torch_tensor_kwargs
import torch # noqa import torch at initialization
def A__ ( self , lowerCAmelCase ) -> int:
'''simple docstring'''
import torch
if isinstance(lowerCAmelCase , lowerCAmelCase ) and column:
if all(
isinstance(lowerCAmelCase , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(lowerCAmelCase )
return column
def A__ ( self , lowerCAmelCase ) -> str:
'''simple docstring'''
import torch
if isinstance(lowerCAmelCase , (str, bytes, type(lowerCAmelCase )) ):
return value
elif isinstance(lowerCAmelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
_lowercase ={}
if isinstance(lowerCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
_lowercase ={'dtype': torch.intaa}
elif isinstance(lowerCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
_lowercase ={'dtype': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(lowerCAmelCase , PIL.Image.Image ):
_lowercase =np.asarray(lowerCAmelCase )
return torch.tensor(lowerCAmelCase , **{**default_dtype, **self.torch_tensor_kwargs} )
def A__ ( self , lowerCAmelCase ) -> str:
'''simple docstring'''
import torch
# support for torch, tf, jax etc.
if hasattr(lowerCAmelCase , '__array__' ) and not isinstance(lowerCAmelCase , torch.Tensor ):
_lowercase =data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(lowerCAmelCase , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(lowerCAmelCase ) for substruct in data_struct] )
elif isinstance(lowerCAmelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(lowerCAmelCase ) for substruct in data_struct] )
return self._tensorize(lowerCAmelCase )
def A__ ( self , lowerCAmelCase ) -> Tuple:
'''simple docstring'''
return map_nested(self._recursive_tensorize , lowerCAmelCase , map_list=lowerCAmelCase )
def A__ ( self , lowerCAmelCase ) -> Mapping:
'''simple docstring'''
_lowercase =self.numpy_arrow_extractor().extract_row(lowerCAmelCase )
_lowercase =self.python_features_decoder.decode_row(lowerCAmelCase )
return self.recursive_tensorize(lowerCAmelCase )
def A__ ( self , lowerCAmelCase ) -> "torch.Tensor":
'''simple docstring'''
_lowercase =self.numpy_arrow_extractor().extract_column(lowerCAmelCase )
_lowercase =self.python_features_decoder.decode_column(lowerCAmelCase , pa_table.column_names[0] )
_lowercase =self.recursive_tensorize(lowerCAmelCase )
_lowercase =self._consolidate(lowerCAmelCase )
return column
def A__ ( self , lowerCAmelCase ) -> Mapping:
'''simple docstring'''
_lowercase =self.numpy_arrow_extractor().extract_batch(lowerCAmelCase )
_lowercase =self.python_features_decoder.decode_batch(lowerCAmelCase )
_lowercase =self.recursive_tensorize(lowerCAmelCase )
for column_name in batch:
_lowercase =self._consolidate(batch[column_name] )
return batch
| 205 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=__lowercase )
class lowercase_ ( __lowercase ):
UpperCamelCase_ : str = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
UpperCamelCase_ : ClassVar[Features] = Features({"image": Image()} )
UpperCamelCase_ : ClassVar[Features] = Features({"labels": ClassLabel} )
UpperCamelCase_ : str = "image"
UpperCamelCase_ : str = "labels"
def UpperCamelCase_ ( self : Dict , A__ : List[str] ) -> List[str]:
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , A__ ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
_snake_case = copy.deepcopy(self )
_snake_case = self.label_schema.copy()
_snake_case = features[self.label_column]
_snake_case = label_schema
return task_template
@property
def UpperCamelCase_ ( self : Union[str, Any] ) -> Dict[str, str]:
return {
self.image_column: "image",
self.label_column: "labels",
}
| 278 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class lowercase_ ( unittest.TestCase ):
def __init__( self : Union[str, Any] , A__ : int , A__ : List[str]=7 , A__ : Tuple=3 , A__ : List[str]=10 , A__ : Optional[int]=18 , A__ : int=30 , A__ : Tuple=400 , A__ : Dict=True , A__ : str=None , A__ : str=True , A__ : List[str]=[0.5, 0.5, 0.5] , A__ : int=[0.5, 0.5, 0.5] , A__ : List[Any]=None , ) -> int:
_snake_case = size if size is not None else {'''shortest_edge''': 18}
_snake_case = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_snake_case = parent
_snake_case = batch_size
_snake_case = num_channels
_snake_case = num_frames
_snake_case = image_size
_snake_case = min_resolution
_snake_case = max_resolution
_snake_case = do_resize
_snake_case = size
_snake_case = do_normalize
_snake_case = image_mean
_snake_case = image_std
_snake_case = crop_size
def UpperCamelCase_ ( self : List[str] ) -> Union[str, Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowercase_ ( __lowercase , unittest.TestCase ):
UpperCamelCase_ : Tuple = VivitImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : Union[str, Any] ) -> List[str]:
_snake_case = VivitImageProcessingTester(self )
@property
def UpperCamelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : Optional[int] ) -> Optional[Any]:
_snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A__ , '''image_mean''' ) )
self.assertTrue(hasattr(A__ , '''image_std''' ) )
self.assertTrue(hasattr(A__ , '''do_normalize''' ) )
self.assertTrue(hasattr(A__ , '''do_resize''' ) )
self.assertTrue(hasattr(A__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(A__ , '''size''' ) )
def UpperCamelCase_ ( self : int ) -> List[Any]:
_snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
_snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def UpperCamelCase_ ( self : List[Any] ) -> Optional[Any]:
# Initialize image_processing
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
_snake_case = prepare_video_inputs(self.image_processor_tester , equal_resolution=A__ )
for video in video_inputs:
self.assertIsInstance(A__ , A__ )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
_snake_case = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_snake_case = image_processing(A__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase_ ( self : Any ) -> List[str]:
# Initialize image_processing
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case = prepare_video_inputs(self.image_processor_tester , equal_resolution=A__ , numpify=A__ )
for video in video_inputs:
self.assertIsInstance(A__ , A__ )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
_snake_case = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_snake_case = image_processing(A__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase_ ( self : Optional[Any] ) -> int:
# Initialize image_processing
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case = prepare_video_inputs(self.image_processor_tester , equal_resolution=A__ , torchify=A__ )
for video in video_inputs:
self.assertIsInstance(A__ , A__ )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
_snake_case = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_snake_case = image_processing(A__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 278 | 1 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[str] , __magic_name__ : str=False ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase :List[str] = OmegaConf.load(SCREAMING_SNAKE_CASE__ )
if display:
print(yaml.dump(OmegaConf.to_container(SCREAMING_SNAKE_CASE__ ) ) )
return config
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] , __magic_name__ : Any=None , __magic_name__ : List[str]=None ) -> str:
"""simple docstring"""
if conf_path is None:
UpperCamelCase :List[Any] = '''./model_checkpoints/vqgan_only.yaml'''
UpperCamelCase :Optional[int] = load_config(SCREAMING_SNAKE_CASE__ , display=SCREAMING_SNAKE_CASE__ )
UpperCamelCase :str = VQModel(**config.model.params )
if ckpt_path is None:
UpperCamelCase :Dict = '''./model_checkpoints/vqgan_only.pt'''
UpperCamelCase :Union[str, Any] = torch.load(SCREAMING_SNAKE_CASE__ , map_location=SCREAMING_SNAKE_CASE__ )
if ".ckpt" in ckpt_path:
UpperCamelCase :Optional[int] = sd['''state_dict''']
model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
del sd
return model
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : Any ) -> Dict:
"""simple docstring"""
UpperCamelCase :Dict = model.encode(SCREAMING_SNAKE_CASE__ )
print(f"""VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}""" )
UpperCamelCase :Union[str, Any] = model.decode(SCREAMING_SNAKE_CASE__ )
return xrec
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[str] , __magic_name__ : Any=False ) -> Tuple:
"""simple docstring"""
UpperCamelCase :Optional[Any] = string.rsplit(""".""" , 1 )
if reload:
UpperCamelCase :Optional[Any] = importlib.import_module(SCREAMING_SNAKE_CASE__ )
importlib.reload(SCREAMING_SNAKE_CASE__ )
return getattr(importlib.import_module(SCREAMING_SNAKE_CASE__ , package=SCREAMING_SNAKE_CASE__ ) , cls )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Union[str, Any] ) -> int:
"""simple docstring"""
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""" , {} ) )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : List[Any]=True , __magic_name__ : List[str]=True ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase :str = instantiate_from_config(SCREAMING_SNAKE_CASE__ )
if sd is not None:
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Union[str, Any] , __magic_name__ : Any , __magic_name__ : Optional[Any] , __magic_name__ : Tuple ) -> str:
"""simple docstring"""
if ckpt:
UpperCamelCase :Dict = torch.load(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )
UpperCamelCase :Union[str, Any] = pl_sd['''global_step''']
print(f"""loaded model from global step {global_step}.""" )
else:
UpperCamelCase :Optional[int] = {'''state_dict''': None}
UpperCamelCase :List[str] = None
UpperCamelCase :Union[str, Any] = load_model_from_config(config.model , pl_sd["""state_dict"""] , gpu=SCREAMING_SNAKE_CASE__ , eval_mode=SCREAMING_SNAKE_CASE__ )['''model''']
return model, global_step
| 38 |
'''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
snake_case_ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class __a (lowerCamelCase ):
def __init__( self : str , __magic_name__ : CLIPSegForImageSegmentation , __magic_name__ : CLIPSegProcessor , __magic_name__ : AutoencoderKL , __magic_name__ : CLIPTextModel , __magic_name__ : CLIPTokenizer , __magic_name__ : UNetaDConditionModel , __magic_name__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __magic_name__ : StableDiffusionSafetyChecker , __magic_name__ : CLIPImageProcessor , ) -> str:
"""simple docstring"""
super().__init__()
if hasattr(scheduler.config , '''steps_offset''' ) and scheduler.config.steps_offset != 1:
UpperCAmelCase_ : Dict = (
F"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
F""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
'''to update the config accordingly as leaving `steps_offset` might led to incorrect results'''
''' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'''
''' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'''
''' file'''
)
deprecate('''steps_offset!=1''' , '''1.0.0''' , __magic_name__ , standard_warn=__magic_name__ )
UpperCAmelCase_ : Optional[int] = dict(scheduler.config )
UpperCAmelCase_ : str = 1
UpperCAmelCase_ : List[str] = FrozenDict(__magic_name__ )
if hasattr(scheduler.config , '''skip_prk_steps''' ) and scheduler.config.skip_prk_steps is False:
UpperCAmelCase_ : Dict = (
F"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
''' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'''
''' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'''
''' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'''
''' Hub, it would be very nice if you could open a Pull request for the'''
''' `scheduler/scheduler_config.json` file'''
)
deprecate('''skip_prk_steps not set''' , '''1.0.0''' , __magic_name__ , standard_warn=__magic_name__ )
UpperCAmelCase_ : Dict = dict(scheduler.config )
UpperCAmelCase_ : str = True
UpperCAmelCase_ : Tuple = FrozenDict(__magic_name__ )
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
segmentation_model=__magic_name__ , segmentation_processor=__magic_name__ , vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , unet=__magic_name__ , scheduler=__magic_name__ , safety_checker=__magic_name__ , feature_extractor=__magic_name__ , )
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Optional[Union[str, int]] = "auto" ) -> List[str]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase_ : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__magic_name__ )
def UpperCAmelCase__ ( self : List[str] ) -> str:
"""simple docstring"""
self.enable_attention_slicing(__magic_name__ )
def UpperCAmelCase__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
UpperCAmelCase_ : Tuple = torch.device('''cuda''' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(__magic_name__ , __magic_name__ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
if self.device != torch.device('''meta''' ) or not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(__magic_name__ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Union[str, Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : Union[torch.FloatTensor, PIL.Image.Image] , __magic_name__ : str , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Tuple , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.segmentation_processor(
text=[text] , images=[image] , padding='''max_length''' , return_tensors='''pt''' ).to(self.device )
UpperCAmelCase_ : int = self.segmentation_model(**__magic_name__ )
UpperCAmelCase_ : Dict = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
UpperCAmelCase_ : List[Any] = self.numpy_to_pil(__magic_name__ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
UpperCAmelCase_ : int = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=__magic_name__ , image=__magic_name__ , mask_image=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , )
| 125 | 0 |
"""simple docstring"""
import json
import sys
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Union[str, Any]:
"""simple docstring"""
with open(__snake_case, encoding='''utf-8''' ) as f:
_UpperCamelCase = json.load(__snake_case )
_UpperCamelCase = ['''<details>''', '''<summary>Show updated benchmarks!</summary>''', ''' ''']
for benchmark_name in sorted(__snake_case ):
_UpperCamelCase = results[benchmark_name]
_UpperCamelCase = benchmark_name.split('''/''' )[-1]
output_md.append(F'''### Benchmark: {benchmark_file_name}''' )
_UpperCamelCase = '''| metric |'''
_UpperCamelCase = '''|--------|'''
_UpperCamelCase = '''| new / old (diff) |'''
for metric_name in sorted(__snake_case ):
_UpperCamelCase = benchmark_res[metric_name]
_UpperCamelCase = metric_vals['''new''']
_UpperCamelCase = metric_vals.get('''old''', __snake_case )
_UpperCamelCase = metric_vals.get('''diff''', __snake_case )
_UpperCamelCase = F''' {new_val:f}''' if isinstance(__snake_case, (int, float) ) else '''None'''
if old_val is not None:
val_str += F''' / {old_val:f}''' if isinstance(__snake_case, (int, float) ) else "None"
if dif_val is not None:
val_str += F''' ({dif_val:f})''' if isinstance(__snake_case, (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('''</details>''' )
with open(__snake_case, '''w''', encoding='''utf-8''' ) as f:
f.writelines('''\n'''.join(__snake_case ) )
if __name__ == "__main__":
_a = sys.argv[1]
_a = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 100 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
"""facebook/xlm-roberta-xl""": """https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json""",
"""facebook/xlm-roberta-xxl""": """https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json""",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'xlm-roberta-xl'
def __init__( self , __a=25_08_80 , __a=25_60 , __a=36 , __a=32 , __a=1_02_40 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_14 , __a=1 , __a=0.02 , __a=1e-05 , __a=1 , __a=0 , __a=2 , __a="absolute" , __a=True , __a=None , **__a , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a)
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = position_embedding_type
_UpperCamelCase = use_cache
_UpperCamelCase = classifier_dropout
class _UpperCAmelCase( lowerCamelCase ):
@property
def UpperCAmelCase ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
_UpperCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_UpperCamelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
])
| 100 | 1 |
'''simple docstring'''
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def a_ ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : Dict ):
# load base model
lowerCAmelCase = StableDiffusionPipeline.from_pretrained(lowerCamelCase , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
lowerCAmelCase = load_file(lowerCamelCase )
lowerCAmelCase = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
lowerCAmelCase = key.split('.' )[0].split(LORA_PREFIX_TEXT_ENCODER + '_' )[-1].split('_' )
lowerCAmelCase = pipeline.text_encoder
else:
lowerCAmelCase = key.split('.' )[0].split(LORA_PREFIX_UNET + '_' )[-1].split('_' )
lowerCAmelCase = pipeline.unet
# find the target layer
lowerCAmelCase = layer_infos.pop(0 )
while len(lowerCamelCase ) > -1:
try:
lowerCAmelCase = curr_layer.__getattr__(lowerCamelCase )
if len(lowerCamelCase ) > 0:
lowerCAmelCase = layer_infos.pop(0 )
elif len(lowerCamelCase ) == 0:
break
except Exception:
if len(lowerCamelCase ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
lowerCAmelCase = layer_infos.pop(0 )
lowerCAmelCase = []
if "lora_down" in key:
pair_keys.append(key.replace('lora_down' , 'lora_up' ) )
pair_keys.append(lowerCamelCase )
else:
pair_keys.append(lowerCamelCase )
pair_keys.append(key.replace('lora_up' , 'lora_down' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
lowerCAmelCase = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
lowerCAmelCase = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowerCamelCase , lowerCamelCase ).unsqueeze(2 ).unsqueeze(3 )
else:
lowerCAmelCase = state_dict[pair_keys[0]].to(torch.floataa )
lowerCAmelCase = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowerCamelCase , lowerCamelCase )
# update visited list
for item in pair_keys:
visited.append(lowerCamelCase )
return pipeline
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.7_5, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
__snake_case =parser.parse_args()
__snake_case =args.base_model_path
__snake_case =args.checkpoint_path
__snake_case =args.dump_path
__snake_case =args.lora_prefix_unet
__snake_case =args.lora_prefix_text_encoder
__snake_case =args.alpha
__snake_case =convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
__snake_case =pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 4 |
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = [0 for i in range(r + 1 )]
# nc0 = 1
lowerCamelCase_ = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
lowerCamelCase_ = min(lowerCamelCase__ , lowerCamelCase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=1_0, r=5))
| 19 | 0 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class lowercase__ ( UpperCamelCase_ , UpperCamelCase_):
@register_to_config
def __init__( self : str , UpperCamelCase__ : int = 768 , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : int = nn.Parameter(torch.zeros(1 , UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(torch.ones(1 , UpperCamelCase__ ) )
def __A ( self : List[str] , UpperCamelCase__ : Optional[Union[str, torch.device]] = None , UpperCamelCase__ : Optional[torch.dtype] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(self.mean.to(UpperCamelCase__ ).to(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(self.std.to(UpperCamelCase__ ).to(UpperCamelCase__ ) )
return self
def __A ( self : Optional[Any] , UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = (embeds - self.mean) * 1.0 / self.std
return embeds
def __A ( self : Dict , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = (embeds * self.std) + self.mean
return embeds
| 353 | import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def A ( _lowercase ):
return (data["data"], data["target"])
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Any = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(_lowercase , _lowercase )
# Predict target for test data
SCREAMING_SNAKE_CASE : Optional[int] = xgb.predict(_lowercase )
SCREAMING_SNAKE_CASE : Dict = predictions.reshape(len(_lowercase ) , 1 )
return predictions
def A ( ):
SCREAMING_SNAKE_CASE : str = fetch_california_housing()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = data_handling(_lowercase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = train_test_split(
_lowercase , _lowercase , test_size=0.25 , random_state=1 )
SCREAMING_SNAKE_CASE : Any = xgboost(_lowercase , _lowercase , _lowercase )
# Error printing
print(f"""Mean Absolute Error : {mean_absolute_error(_lowercase , _lowercase )}""" )
print(f"""Mean Square Error : {mean_squared_error(_lowercase , _lowercase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 258 | 0 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase ( lowercase_ ):
'''simple docstring'''
__UpperCamelCase : int = ['''image_processor''', '''tokenizer''']
__UpperCamelCase : Tuple = '''Pix2StructImageProcessor'''
__UpperCamelCase : Optional[int] = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
_A: Optional[int] = False
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
def __call__( self : Any , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[bool, str, PaddingStrategy] = False , lowerCAmelCase_ : Union[bool, str, TruncationStrategy] = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[int] = 2_0_4_8 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , **lowerCAmelCase_ : Dict , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None and not self.image_processor.is_vqa:
_A: int = self.tokenizer
_A: int = self.tokenizer(
text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
_A: Optional[int] = self.image_processor(
lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , max_patches=lowerCAmelCase_ , **lowerCAmelCase_ )
else:
# add pixel_values and bbox
_A: str = self.image_processor(
lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , max_patches=lowerCAmelCase_ , header_text=lowerCAmelCase_ , **lowerCAmelCase_ )
if text is not None and not self.image_processor.is_vqa:
_A: List[str] = self.tokenizer(
text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
if "attention_mask" in text_encoding:
_A: str = text_encoding.pop('''attention_mask''' )
if "input_ids" in text_encoding:
_A: List[Any] = text_encoding.pop('''input_ids''' )
else:
_A: Tuple = None
if text_encoding is not None:
encoding_image_processor.update(lowerCAmelCase_ )
return encoding_image_processor
def __magic_name__ ( self : Optional[Any] , *lowerCAmelCase_ : Any , **lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __magic_name__ ( self : str , *lowerCAmelCase_ : Any , **lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
_A: Any = self.tokenizer.model_input_names
_A: str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 121 |
'''simple docstring'''
def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square(__snake_case : int, __snake_case : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
A__ : int =update_area_of_max_square(__snake_case, col + 1 )
A__ : int =update_area_of_max_square(row + 1, col + 1 )
A__ : int =update_area_of_max_square(row + 1, __snake_case )
if mat[row][col]:
A__ : Optional[Any] =1 + min([right, diagonal, down] )
A__ : Dict =max(largest_square_area[0], __snake_case )
return sub_problem_sol
else:
return 0
A__ : List[Any] =[0]
update_area_of_max_square(0, 0 )
return largest_square_area[0]
def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
__snake_case : int, __snake_case : int, __snake_case : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
A__ : str =update_area_of_max_square_using_dp_array(__snake_case, col + 1, __snake_case )
A__ : Any =update_area_of_max_square_using_dp_array(row + 1, col + 1, __snake_case )
A__ : List[str] =update_area_of_max_square_using_dp_array(row + 1, __snake_case, __snake_case )
if mat[row][col]:
A__ : Optional[int] =1 + min([right, diagonal, down] )
A__ : Any =max(largest_square_area[0], __snake_case )
A__ : Union[str, Any] =sub_problem_sol
return sub_problem_sol
else:
return 0
A__ : Any =[0]
A__ : Optional[Any] =[[-1] * cols for _ in range(__snake_case )]
update_area_of_max_square_using_dp_array(0, 0, __snake_case )
return largest_square_area[0]
def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : list[list[int]] ) -> int:
"""simple docstring"""
A__ : Optional[int] =[[0] * (cols + 1) for _ in range(rows + 1 )]
A__ : str =0
for row in range(rows - 1, -1, -1 ):
for col in range(cols - 1, -1, -1 ):
A__ : List[Any] =dp_array[row][col + 1]
A__ : List[str] =dp_array[row + 1][col + 1]
A__ : str =dp_array[row + 1][col]
if mat[row][col] == 1:
A__ : str =1 + min(__snake_case, __snake_case, __snake_case )
A__ : Optional[Any] =max(dp_array[row][col], __snake_case )
else:
A__ : Tuple =0
return largest_square_area
def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : list[list[int]] ) -> int:
"""simple docstring"""
A__ : Union[str, Any] =[0] * (cols + 1)
A__ : int =[0] * (cols + 1)
A__ : str =0
for row in range(rows - 1, -1, -1 ):
for col in range(cols - 1, -1, -1 ):
A__ : Union[str, Any] =current_row[col + 1]
A__ : List[str] =next_row[col + 1]
A__ : str =next_row[col]
if mat[row][col] == 1:
A__ : str =1 + min(__snake_case, __snake_case, __snake_case )
A__ : Dict =max(current_row[col], __snake_case )
else:
A__ : str =0
A__ : Optional[Any] =current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 134 | 0 |
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: int , UpperCamelCase: int , UpperCamelCase: Union[str, Any]=13 , UpperCamelCase: List[Any]=7 , UpperCamelCase: Any=True , UpperCamelCase: Optional[Any]=True , UpperCamelCase: Optional[Any]=True , UpperCamelCase: str=True , UpperCamelCase: Optional[int]=99 , UpperCamelCase: Optional[Any]=32 , UpperCamelCase: Tuple=5 , UpperCamelCase: Optional[int]=4 , UpperCamelCase: int=37 , UpperCamelCase: str="gelu" , UpperCamelCase: Optional[Any]=0.1 , UpperCamelCase: List[Any]=0.1 , UpperCamelCase: Tuple=5_12 , UpperCamelCase: List[str]=16 , UpperCamelCase: List[str]=2 , UpperCamelCase: List[Any]=0.02 , UpperCamelCase: List[str]=False , UpperCamelCase: int=True , UpperCamelCase: Union[str, Any]="None" , UpperCamelCase: Optional[int]=3 , UpperCamelCase: List[str]=4 , UpperCamelCase: List[str]=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = relative_attention
A__ = position_biased_input
A__ = pos_att_type
A__ = scope
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = ids_tensor([self.batch_size] , self.num_choices )
A__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: str ):
"""simple docstring"""
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = self.get_config()
A__ = 3_00
return config
def UpperCamelCase ( self: List[Any] , UpperCamelCase: str ):
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCamelCase ( self: Tuple , UpperCamelCase: List[Any] , UpperCamelCase: List[str] , UpperCamelCase: List[Any] , UpperCamelCase: List[str] , UpperCamelCase: List[str] , UpperCamelCase: Optional[Any] , UpperCamelCase: Tuple ):
"""simple docstring"""
A__ = DebertaModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase )[0]
A__ = model(UpperCamelCase , token_type_ids=UpperCamelCase )[0]
A__ = model(UpperCamelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCamelCase ( self: List[str] , UpperCamelCase: Optional[int] , UpperCamelCase: Optional[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Tuple , UpperCamelCase: Tuple , UpperCamelCase: str , UpperCamelCase: Any ):
"""simple docstring"""
A__ = DebertaForMaskedLM(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: List[Any] , UpperCamelCase: int , UpperCamelCase: Dict , UpperCamelCase: Dict , UpperCamelCase: Tuple , UpperCamelCase: str ):
"""simple docstring"""
A__ = self.num_labels
A__ = DebertaForSequenceClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(UpperCamelCase )
def UpperCamelCase ( self: Optional[int] , UpperCamelCase: List[Any] , UpperCamelCase: Any , UpperCamelCase: Dict , UpperCamelCase: int , UpperCamelCase: Optional[Any] , UpperCamelCase: str , UpperCamelCase: int ):
"""simple docstring"""
A__ = self.num_labels
A__ = DebertaForTokenClassification(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self: Tuple , UpperCamelCase: Tuple , UpperCamelCase: Optional[int] , UpperCamelCase: Tuple , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: Tuple , UpperCamelCase: Any ):
"""simple docstring"""
A__ = DebertaForQuestionAnswering(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , start_positions=UpperCamelCase , end_positions=UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a ( _lowerCamelCase, _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase = (
{
"feature-extraction": DebertaModel,
"fill-mask": DebertaForMaskedLM,
"question-answering": DebertaForQuestionAnswering,
"text-classification": DebertaForSequenceClassification,
"token-classification": DebertaForTokenClassification,
"zero-shot": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = DebertaModelTester(self )
A__ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 )
def UpperCamelCase ( self: int ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*UpperCamelCase )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*UpperCamelCase )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*UpperCamelCase )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*UpperCamelCase )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*UpperCamelCase )
@slow
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = DebertaModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason="""Model not available yet""" )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
pass
@slow
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = DebertaModel.from_pretrained("""microsoft/deberta-base""" )
A__ = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
A__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
A__ = model(UpperCamelCase , attention_mask=UpperCamelCase )[0]
# compare the actual values for a slice.
A__ = torch.tensor(
[[[-0.5_986, -0.8_055, -0.8_462], [1.4_484, -0.9_348, -0.8_059], [0.3_123, 0.0_032, -1.4_131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase , atol=1e-4 ) , f"""{output[:, 1:4, 1:4]}""" )
| 363 |
"""simple docstring"""
from typing import Any
class a :
"""simple docstring"""
def __init__( self: List[Any] , UpperCamelCase: Any ):
"""simple docstring"""
A__ = data
A__ = None
class a :
"""simple docstring"""
def __init__( self: List[str] ):
"""simple docstring"""
A__ = None
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = self.head
while temp is not None:
print(temp.data , end=""" """ )
A__ = temp.next
print()
def UpperCamelCase ( self: str , UpperCamelCase: Any ):
"""simple docstring"""
A__ = Node(UpperCamelCase )
A__ = self.head
A__ = new_node
def UpperCamelCase ( self: Optional[int] , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
if node_data_a == node_data_a:
return
else:
A__ = self.head
while node_a is not None and node_a.data != node_data_a:
A__ = node_a.next
A__ = self.head
while node_a is not None and node_a.data != node_data_a:
A__ = node_a.next
if node_a is None or node_a is None:
return
A__ , A__ = node_a.data, node_a.data
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Tuple = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('After swapping')
ll.print_list()
| 69 | 0 |
def _UpperCamelCase ( lowercase__ = 10 , lowercase__ = 1000 , lowercase__ = True ):
assert (
isinstance(lowercase__ , lowercase__ )
and isinstance(lowercase__ , lowercase__ )
and isinstance(lowercase__ , lowercase__ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('''Invalid value for min_val or max_val (min_value < max_value)''' )
return min_val if option else max_val
def _UpperCamelCase ( lowercase__ , lowercase__ ):
return int((number_a + number_a) / 2 )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
assert (
isinstance(lowercase__ , lowercase__ ) and isinstance(lowercase__ , lowercase__ ) and isinstance(lowercase__ , lowercase__ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('''argument value for lower and higher must be(lower > higher)''' )
if not lower < to_guess < higher:
raise ValueError(
'''guess value must be within the range of lower and higher value''' )
def answer(lowercase__ ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('''started...''' )
__SCREAMING_SNAKE_CASE : Dict = lower
__SCREAMING_SNAKE_CASE : Union[str, Any] = higher
__SCREAMING_SNAKE_CASE : List[Any] = []
while True:
__SCREAMING_SNAKE_CASE : Dict = get_avg(lowercase__ , lowercase__ )
last_numbers.append(lowercase__ )
if answer(lowercase__ ) == "low":
__SCREAMING_SNAKE_CASE : Optional[Any] = number
elif answer(lowercase__ ) == "high":
__SCREAMING_SNAKE_CASE : Dict = number
else:
break
print(F'''guess the number : {last_numbers[-1]}''' )
print(F'''details : {last_numbers!s}''' )
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : List[str] = int(input('''Enter lower value : ''' ).strip() )
__SCREAMING_SNAKE_CASE : Dict = int(input('''Enter high value : ''' ).strip() )
__SCREAMING_SNAKE_CASE : List[Any] = int(input('''Enter value to guess : ''' ).strip() )
guess_the_number(lowercase__ , lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 9 |
import random
from .binary_exp_mod import bin_exp_mod
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=1000 ):
'''simple docstring'''
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowercase = n - 1
lowercase = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowercase = 0
while count < prec:
lowercase = random.randint(2 , n - 1 )
lowercase = bin_exp_mod(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if b != 1:
lowercase = True
for _ in range(lowerCAmelCase__ ):
if b == n - 1:
lowercase = False
break
lowercase = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
lowercase__ :Tuple = abs(int(input("Enter bound : ").strip()))
print("Here's the list of primes:")
print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 101 | 0 |
"""simple docstring"""
import os
import jsonlines
import numpy as np
from tqdm import tqdm
_A = 20_48
_A = 40_96
_A = 42
_A = os.environ.pop("""PROCESS_TRAIN""", """false""")
_A = {"""null""": 0, """short""": 1, """long""": 2, """yes""": 3, """no""": 4}
def a__ ( lowerCAmelCase ) -> Optional[Any]:
def choose_first(lowerCAmelCase , lowerCAmelCase=False ):
assert isinstance(_lowerCamelCase , _lowerCamelCase )
if len(_lowerCamelCase ) == 1:
UpperCAmelCase__ : Any = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
UpperCAmelCase__ : Union[str, Any] = {k: [a[k]] for k in a}
if len(a["""start_token"""] ) > 0:
break
return a
UpperCAmelCase__ : Union[str, Any] = {"""id""": example["""id"""]}
UpperCAmelCase__ : Any = example["""annotations"""]
UpperCAmelCase__ : List[Any] = annotation["""yes_no_answer"""]
if 0 in yes_no_answer or 1 in yes_no_answer:
UpperCAmelCase__ : Tuple = ["""yes"""] if 1 in yes_no_answer else ["""no"""]
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : Tuple = []
UpperCAmelCase__ : Optional[Any] = ["""<cls>"""]
else:
UpperCAmelCase__ : Union[str, Any] = ["""short"""]
UpperCAmelCase__ : Optional[Any] = choose_first(annotation["""short_answers"""] )
if len(out["""start_token"""] ) == 0:
# answer will be long if short is not available
UpperCAmelCase__ : Dict = ["""long"""]
UpperCAmelCase__ : str = choose_first(annotation["""long_answer"""] , is_long_answer=_lowerCamelCase )
UpperCAmelCase__ : Optional[int] = []
answer.update(_lowerCamelCase )
# disregard some samples
if len(answer["""start_token"""] ) > 1 or answer["start_token"] == answer["end_token"]:
UpperCAmelCase__ : List[Any] = True
else:
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Any = ["""start_token""", """end_token""", """start_byte""", """end_byte""", """text"""]
if not all(isinstance(answer[k] , _lowerCamelCase ) for k in cols ):
raise ValueError("""Issue in ID""" , example["""id"""] )
return answer
def a__ ( lowerCAmelCase , lowerCAmelCase=False ) -> List[str]:
UpperCAmelCase__ : List[Any] = _get_single_answer(_lowerCamelCase )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
UpperCAmelCase__ : Optional[Any] = example["""document"""]["""tokens"""]
UpperCAmelCase__ : Union[str, Any] = []
for i in range(len(doc["""token"""] ) ):
if not doc["is_html"][i]:
context.append(doc["""token"""][i] )
return {
"context": " ".join(_lowerCamelCase ),
"answer": {
"start_token": -1_00, # ignore index in cross-entropy
"end_token": -1_00, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
UpperCAmelCase__ : Union[str, Any] = ["""start_token""", """end_token"""]
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
UpperCAmelCase__ : str = example["""document"""]["""tokens"""]
UpperCAmelCase__ : Any = answer["""start_token"""]
UpperCAmelCase__ : Optional[int] = answer["""end_token"""]
UpperCAmelCase__ : int = []
for i in range(len(doc["""token"""] ) ):
if not doc["is_html"][i]:
context.append(doc["""token"""][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
UpperCAmelCase__ : Dict = """ """.join(context[start_token:end_token] )
# checking above code
if assertion:
UpperCAmelCase__ : str = doc["""is_html"""][answer["""start_token"""] : answer["""end_token"""]]
UpperCAmelCase__ : Union[str, Any] = doc["""token"""][answer["""start_token"""] : answer["""end_token"""]]
UpperCAmelCase__ : Dict = """ """.join([old[i] for i in range(len(_lowerCamelCase ) ) if not is_html[i]] )
if new != old:
print("""ID:""" , example["""id"""] )
print("""New:""" , _lowerCamelCase , end="""\n""" )
print("""Old:""" , _lowerCamelCase , end="""\n\n""" )
return {
"context": " ".join(_lowerCamelCase ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=20_48 , lowerCAmelCase=40_96 , lowerCAmelCase=True ) -> Dict:
UpperCAmelCase__ : List[Any] = get_context_and_ans(_lowerCamelCase , assertion=_lowerCamelCase )
UpperCAmelCase__ : Dict = out["""answer"""]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
UpperCAmelCase__ : List[Any] = tokenizer(example["""question"""]["""text"""] , out["""context"""] ).input_ids
UpperCAmelCase__ : Optional[int] = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
UpperCAmelCase__ : int = []
UpperCAmelCase__ : List[str] = []
UpperCAmelCase__ : Optional[int] = input_ids[:q_len]
UpperCAmelCase__ : Dict = range(_lowerCamelCase , len(_lowerCamelCase ) , max_length - doc_stride )
for i in doc_start_indices:
UpperCAmelCase__ : Dict = i + max_length - q_len
UpperCAmelCase__ : Tuple = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["""category"""][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-1_00] * len(_lowerCamelCase ),
"end_token": [-1_00] * len(_lowerCamelCase ),
"category": category,
},
}
UpperCAmelCase__ : Optional[int] = out["""context"""].split()
UpperCAmelCase__ : Optional[int] = splitted_context[answer["""end_token"""]]
UpperCAmelCase__ : Any = len(
tokenizer(
""" """.join(splitted_context[: answer["""start_token"""]] ) , add_special_tokens=_lowerCamelCase , ).input_ids )
UpperCAmelCase__ : Tuple = len(
tokenizer(""" """.join(splitted_context[: answer["""end_token"""]] ) , add_special_tokens=_lowerCamelCase ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
UpperCAmelCase__ : Tuple = len(tokenizer(_lowerCamelCase , add_special_tokens=_lowerCamelCase ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
UpperCAmelCase__ : Union[str, Any] = input_ids[answer["""start_token"""] : answer["""end_token"""] + 1] # right & left are inclusive
UpperCAmelCase__ : Optional[int] = answer["""start_token"""]
UpperCAmelCase__ : List[Any] = answer["""end_token"""]
if assertion:
UpperCAmelCase__ : Any = tokenizer.decode(_lowerCamelCase )
if answer["span"] != new:
print("""ISSUE IN TOKENIZATION""" )
print("""OLD:""" , answer["""span"""] )
print("""NEW:""" , _lowerCamelCase , end="""\n\n""" )
if len(_lowerCamelCase ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
UpperCAmelCase__ : List[Any] = input_ids[:q_len]
UpperCAmelCase__ : Tuple = range(_lowerCamelCase , len(_lowerCamelCase ) , max_length - doc_stride )
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : List[Any] = []
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : Tuple = [] # null, yes, no, long, short
for i in doc_start_indices:
UpperCAmelCase__ : Dict = i + max_length - q_len
UpperCAmelCase__ : Union[str, Any] = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
UpperCAmelCase__ : str = start_token - i + q_len
UpperCAmelCase__ : int = end_token - i + q_len
answers_category.append(answer["""category"""][0] ) # ["short"] -> "short"
else:
UpperCAmelCase__ : Optional[Any] = -1_00
UpperCAmelCase__ : Optional[Any] = -1_00
answers_category.append("""null""" )
UpperCAmelCase__ : List[str] = inputs[-1][start_token : end_token + 1]
answers_start_token.append(_lowerCamelCase )
answers_end_token.append(_lowerCamelCase )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("""ISSUE in strided for ID:""" , example["""id"""] )
print("""New:""" , tokenizer.decode(_lowerCamelCase ) )
print("""Old:""" , tokenizer.decode(_lowerCamelCase ) , end="""\n\n""" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=20_48 , lowerCAmelCase=40_96 , lowerCAmelCase=False ) -> int:
UpperCAmelCase__ : Union[str, Any] = get_strided_contexts_and_ans(
_lowerCamelCase , _lowerCamelCase , doc_stride=_lowerCamelCase , max_length=_lowerCamelCase , assertion=_lowerCamelCase , )
return example
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> Optional[Any]:
with jsonlines.open(_lowerCamelCase , """a""" ) as writer:
for example in tqdm(_lowerCamelCase , total=len(_lowerCamelCase ) , desc="""Saving samples ... """ ):
UpperCAmelCase__ : int = example["""labels"""]
for ids, start, end, cat in zip(
example["""input_ids"""] , labels["""start_token"""] , labels["""end_token"""] , labels["""category"""] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"""input_ids""": ids,
"""start_token""": start,
"""end_token""": end,
"""category""": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
_A = load_dataset("""natural_questions""")
_A = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""")
_A = data["""train""" if PROCESS_TRAIN == """true""" else """validation"""]
_A = {
"""tokenizer""": tokenizer,
"""doc_stride""": DOC_STRIDE,
"""max_length""": MAX_LENGTH,
"""assertion""": False,
}
_A = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
_A = data.remove_columns(["""annotations""", """document""", """id""", """question"""])
print(data)
np.random.seed(SEED)
_A = """nq-training.jsonl""" if PROCESS_TRAIN == """true""" else """nq-validation.jsonl"""
save_to_disk(data, file_name=cache_file_name)
| 363 |
"""simple docstring"""
_A = range(2, 20 + 1)
_A = [10**k for k in range(ks[-1] + 1)]
_A = {}
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> int:
UpperCAmelCase__ : List[str] = sum(a_i[j] for j in range(lowerCAmelCase , len(lowerCAmelCase ) ) )
UpperCAmelCase__ : str = sum(a_i[j] * base[j] for j in range(min(len(lowerCAmelCase ) , lowerCAmelCase ) ) )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = 0, 0
UpperCAmelCase__ : Optional[Any] = n - i
UpperCAmelCase__ : Union[str, Any] = memo.get(lowerCAmelCase )
if sub_memo is not None:
UpperCAmelCase__ : Any = sub_memo.get(lowerCAmelCase )
if jumps is not None and len(lowerCAmelCase ) > 0:
# find and make the largest jump without going over
UpperCAmelCase__ : Optional[int] = -1
for _k in range(len(lowerCAmelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
UpperCAmelCase__ : str = _k
break
if max_jump >= 0:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = jumps[max_jump]
# since the difference between jumps is cached, add c
UpperCAmelCase__ : Any = diff + c
for j in range(min(lowerCAmelCase , len(lowerCAmelCase ) ) ):
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = divmod(lowerCAmelCase , 10 )
if new_c > 0:
add(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
else:
UpperCAmelCase__ : int = []
else:
UpperCAmelCase__ : Union[str, Any] = {c: []}
UpperCAmelCase__ : Union[str, Any] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = next_term(lowerCAmelCase , k - 1 , i + dn , lowerCAmelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = compute(lowerCAmelCase , lowerCAmelCase , i + dn , lowerCAmelCase )
diff += _diff
dn += terms_jumped
UpperCAmelCase__ : str = sub_memo[c]
# keep jumps sorted by # of terms skipped
UpperCAmelCase__ : Any = 0
while j < len(lowerCAmelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(lowerCAmelCase , (diff, dn, k) )
return (diff, dn)
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> List[Any]:
if i >= n:
return 0, i
if k > len(lowerCAmelCase ):
a_i.extend([0 for _ in range(k - len(lowerCAmelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
UpperCAmelCase__ : Tuple = i
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = 0, 0, 0
for j in range(len(lowerCAmelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
UpperCAmelCase__ : Dict = ds_c + ds_b
diff += addend
UpperCAmelCase__ : Tuple = 0
for j in range(lowerCAmelCase ):
UpperCAmelCase__ : Tuple = a_i[j] + addend
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = divmod(lowerCAmelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return diff, i - start_i
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[Any]:
for j in range(lowerCAmelCase , len(lowerCAmelCase ) ):
UpperCAmelCase__ : Optional[Any] = digits[j] + addend
if s >= 10:
UpperCAmelCase__ , UpperCAmelCase__ : Dict = divmod(lowerCAmelCase , 10 )
UpperCAmelCase__ : Any = addend // 10 + quotient
else:
UpperCAmelCase__ : Optional[Any] = s
UpperCAmelCase__ : Tuple = addend // 10
if addend == 0:
break
while addend > 0:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = divmod(lowerCAmelCase , 10 )
digits.append(lowerCAmelCase )
def a__ ( lowerCAmelCase = 10**15 ) -> int:
UpperCAmelCase__ : Optional[int] = [1]
UpperCAmelCase__ : Union[str, Any] = 1
UpperCAmelCase__ : Dict = 0
while True:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = next_term(lowerCAmelCase , 20 , i + dn , lowerCAmelCase )
dn += terms_jumped
if dn == n - i:
break
UpperCAmelCase__ : Optional[int] = 0
for j in range(len(lowerCAmelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 166 | 0 |
import string
import numpy
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> int:
return b if a == 0 else greatest_common_divisor(b % a , __UpperCAmelCase)
class __snake_case :
'''simple docstring'''
lowerCAmelCase__ = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
lowerCAmelCase__ = numpy.vectorize(lambda __lowerCamelCase : x % 36 )
lowerCAmelCase__ = numpy.vectorize(__lowerCamelCase )
def __init__( self : List[Any] , A : Union[str, Any] ):
__snake_case: Union[str, Any] = self.modulus(A ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
__snake_case: Optional[Any] = encrypt_key.shape[0]
def UpperCAmelCase__ ( self : List[str] , A : Optional[Any] ):
return self.key_string.index(A )
def UpperCAmelCase__ ( self : str , A : Tuple ):
return self.key_string[round(A )]
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Any = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__snake_case: str = det % len(self.key_string )
__snake_case: List[Any] = len(self.key_string )
if greatest_common_divisor(A , len(self.key_string ) ) != 1:
__snake_case: Dict = (
f'''determinant modular {req_l} of encryption key({det}) '''
f'''is not co prime w.r.t {req_l}.\nTry another key.'''
)
raise ValueError(A )
def UpperCAmelCase__ ( self : List[Any] , A : int ):
__snake_case: Any = [char for char in text.upper() if char in self.key_string]
__snake_case: Tuple = chars[-1]
while len(A ) % self.break_key != 0:
chars.append(A )
return "".join(A )
def UpperCAmelCase__ ( self : Union[str, Any] , A : List[str] ):
__snake_case: int = self.process_text(text.upper() )
__snake_case: List[Any] = ''''''
for i in range(0 , len(A ) - self.break_key + 1 , self.break_key ):
__snake_case: Tuple = text[i : i + self.break_key]
__snake_case: str = [self.replace_letters(A ) for char in batch]
__snake_case: Dict = numpy.array([vec] ).T
__snake_case: Tuple = self.modulus(self.encrypt_key.dot(A ) ).T.tolist()[
0
]
__snake_case: Dict = ''''''.join(
self.replace_digits(A ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def UpperCAmelCase__ ( self : Any ):
__snake_case: int = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__snake_case: List[Any] = det % len(self.key_string )
__snake_case: Any = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
__snake_case: Any = i
break
__snake_case: Optional[Any] = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(A ) )
def UpperCAmelCase__ ( self : Optional[int] , A : int ):
__snake_case: Union[str, Any] = self.make_decrypt_key()
__snake_case: Tuple = self.process_text(text.upper() )
__snake_case: Dict = ''''''
for i in range(0 , len(A ) - self.break_key + 1 , self.break_key ):
__snake_case: Optional[Any] = text[i : i + self.break_key]
__snake_case: Tuple = [self.replace_letters(A ) for char in batch]
__snake_case: List[Any] = numpy.array([vec] ).T
__snake_case: int = self.modulus(decrypt_key.dot(A ) ).T.tolist()[0]
__snake_case: Any = ''''''.join(
self.replace_digits(A ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def A__ ( ) -> None:
__snake_case: Dict = int(input("""Enter the order of the encryption key: """))
__snake_case: List[Any] = []
print("""Enter each row of the encryption key with space separated integers""")
for _ in range(__UpperCAmelCase):
__snake_case: List[Any] = [int(__UpperCAmelCase) for x in input().split()]
hill_matrix.append(__UpperCAmelCase)
__snake_case: Optional[Any] = HillCipher(numpy.array(__UpperCAmelCase))
print("""Would you like to encrypt or decrypt some text? (1 or 2)""")
__snake_case: Union[str, Any] = input("""\n1. Encrypt\n2. Decrypt\n""")
if option == "1":
__snake_case: List[str] = input("""What text would you like to encrypt?: """)
print("""Your encrypted text is:""")
print(hc.encrypt(__UpperCAmelCase))
elif option == "2":
__snake_case: List[Any] = input("""What text would you like to decrypt?: """)
print("""Your decrypted text is:""")
print(hc.decrypt(__UpperCAmelCase))
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 111 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase_ = logging.get_logger(__name__)
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : str = ["pixel_values"]
def __init__( self, __magic_name__ = True, __magic_name__ = 32, __magic_name__=PILImageResampling.BILINEAR, __magic_name__ = True, **__magic_name__, ) -> None:
"""simple docstring"""
UpperCamelCase__ : int = do_resize
UpperCamelCase__ : Tuple = do_rescale
UpperCamelCase__ : Any = size_divisor
UpperCamelCase__ : List[Any] = resample
super().__init__(**__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__ = None, **__magic_name__ ) -> np.ndarray:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : List[Any] = get_image_size(__magic_name__ )
# Rounds the height and width down to the closest multiple of size_divisor
UpperCamelCase__ : Any = height // size_divisor * size_divisor
UpperCamelCase__ : Optional[int] = width // size_divisor * size_divisor
UpperCamelCase__ : str = resize(__magic_name__, (new_h, new_w), resample=__magic_name__, data_format=__magic_name__, **__magic_name__ )
return image
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ = None, **__magic_name__ ) -> np.ndarray:
"""simple docstring"""
return rescale(image=__magic_name__, scale=__magic_name__, data_format=__magic_name__, **__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = None, __magic_name__ = None, __magic_name__=None, __magic_name__ = None, __magic_name__ = None, __magic_name__ = ChannelDimension.FIRST, **__magic_name__, ) -> BatchFeature:
"""simple docstring"""
UpperCamelCase__ : str = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ : Any = size_divisor if size_divisor is not None else self.size_divisor
UpperCamelCase__ : str = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
UpperCamelCase__ : Union[str, Any] = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
UpperCamelCase__ : Optional[Any] = [to_numpy_array(__magic_name__ ) for img in images]
if do_resize:
UpperCamelCase__ : str = [self.resize(__magic_name__, size_divisor=__magic_name__, resample=__magic_name__ ) for image in images]
if do_rescale:
UpperCamelCase__ : List[Any] = [self.rescale(__magic_name__, scale=1 / 255 ) for image in images]
UpperCamelCase__ : Optional[Any] = [to_channel_dimension_format(__magic_name__, __magic_name__ ) for image in images]
UpperCamelCase__ : Tuple = {'''pixel_values''': images}
return BatchFeature(data=__magic_name__, tensor_type=__magic_name__ )
| 201 | 0 |
def snake_case ( snake_case__ :str) -> list:
if n_term == "":
return []
_A = []
for temp in range(int(snake_case__)):
series.append(F'''1/{temp + 1}''' if series else """1""")
return series
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 81 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 81 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCamelCase (A__ ,unittest.TestCase ):
lowerCamelCase__ : Tuple = ShapEPipeline
lowerCamelCase__ : Union[str, Any] = ['prompt']
lowerCamelCase__ : Optional[Any] = ['prompt']
lowerCamelCase__ : str = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
lowerCamelCase__ : List[Any] = False
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
return 3_2
@property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
return 3_2
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
return 8
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
SCREAMING_SNAKE_CASE__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(__UpperCAmelCase )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 1_6,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 3_2,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
SCREAMING_SNAKE_CASE__ = PriorTransformer(**__UpperCAmelCase )
return model
@property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = {
"""param_shapes""": (
(self.renderer_dim, 9_3),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 1_2,
"""background""": (
0.1,
0.1,
0.1,
),
}
SCREAMING_SNAKE_CASE__ = ShapERenderer(**__UpperCAmelCase )
return model
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.dummy_prior
SCREAMING_SNAKE_CASE__ = self.dummy_text_encoder
SCREAMING_SNAKE_CASE__ = self.dummy_tokenizer
SCREAMING_SNAKE_CASE__ = self.dummy_renderer
SCREAMING_SNAKE_CASE__ = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1_0_2_4 , prediction_type="""sample""" , use_karras_sigmas=__UpperCAmelCase , clip_sample=__UpperCAmelCase , clip_sample_range=1.0 , )
SCREAMING_SNAKE_CASE__ = {
"""prior""": prior,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int]=0 ) -> Any:
if str(__UpperCAmelCase ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE__ = torch.manual_seed(__UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE__ = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = {
"""prompt""": """horse""",
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 3_2,
"""output_type""": """np""",
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = """cpu"""
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = self.pipeline_class(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = pipe(**self.get_dummy_inputs(__UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ = output.images[0]
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (2_0, 3_2, 3_2, 3)
SCREAMING_SNAKE_CASE__ = np.array(
[
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
SCREAMING_SNAKE_CASE__ = torch_device == """cpu"""
SCREAMING_SNAKE_CASE__ = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__UpperCAmelCase , relax_max_difference=__UpperCAmelCase , )
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = self.pipeline_class(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(__UpperCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
SCREAMING_SNAKE_CASE__ = batch_size * [inputs[key]]
SCREAMING_SNAKE_CASE__ = pipe(**__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCamelCase (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
SCREAMING_SNAKE_CASE__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
SCREAMING_SNAKE_CASE__ = ShapEPipeline.from_pretrained("""openai/shap-e""" )
SCREAMING_SNAKE_CASE__ = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipe(
"""a shark""" , generator=__UpperCAmelCase , guidance_scale=15.0 , num_inference_steps=6_4 , frame_size=6_4 , output_type="""np""" , ).images[0]
assert images.shape == (2_0, 6_4, 6_4, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
| 165 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
A_ : Dict = logging.get_logger(__name__)
A_ : Any = {
"microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json",
"microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json",
"microsoft/deberta-v2-xlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"
),
"microsoft/deberta-v2-xxlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"
),
}
class lowerCamelCase (A__ ):
lowerCamelCase__ : Tuple = 'deberta-v2'
def __init__( self : Any , __UpperCAmelCase : Optional[Any]=1_2_8_1_0_0 , __UpperCAmelCase : Optional[Any]=1_5_3_6 , __UpperCAmelCase : List[Any]=2_4 , __UpperCAmelCase : str=2_4 , __UpperCAmelCase : Optional[int]=6_1_4_4 , __UpperCAmelCase : Any="gelu" , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : Dict=0.1 , __UpperCAmelCase : Optional[Any]=5_1_2 , __UpperCAmelCase : List[str]=0 , __UpperCAmelCase : int=0.02 , __UpperCAmelCase : Any=1e-7 , __UpperCAmelCase : Tuple=False , __UpperCAmelCase : Any=-1 , __UpperCAmelCase : Union[str, Any]=0 , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : Optional[Any]=0 , __UpperCAmelCase : Union[str, Any]="gelu" , **__UpperCAmelCase : Any , ) -> Union[str, Any]:
super().__init__(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = relative_attention
SCREAMING_SNAKE_CASE__ = max_relative_positions
SCREAMING_SNAKE_CASE__ = pad_token_id
SCREAMING_SNAKE_CASE__ = position_biased_input
# Backwards compatibility
if type(__UpperCAmelCase ) == str:
SCREAMING_SNAKE_CASE__ = [x.strip() for x in pos_att_type.lower().split("""|""" )]
SCREAMING_SNAKE_CASE__ = pos_att_type
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = kwargs.get("""pooler_hidden_size""" , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = pooler_dropout
SCREAMING_SNAKE_CASE__ = pooler_hidden_act
class lowerCamelCase (A__ ):
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
SCREAMING_SNAKE_CASE__ = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return 1_2
def SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __UpperCAmelCase : int = -1 , __UpperCAmelCase : int = -1 , __UpperCAmelCase : int = -1 , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional["TensorType"] = None , __UpperCAmelCase : int = 3 , __UpperCAmelCase : int = 4_0 , __UpperCAmelCase : int = 4_0 , __UpperCAmelCase : "PreTrainedTokenizerBase" = None , ) -> Mapping[str, Any]:
SCREAMING_SNAKE_CASE__ = super().generate_dummy_inputs(preprocessor=__UpperCAmelCase , framework=__UpperCAmelCase )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 165 | 1 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 368 | """simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : str = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_snake_case : Dict = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_snake_case : Dict = False
_snake_case : List[str] = False
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int=False ):
_UpperCAmelCase = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class in get_values(__lowerCAmelCase ):
_UpperCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class a ( lowerCAmelCase_ ):
def __init__( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : str=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[str]=99 , __lowerCAmelCase : Optional[int]=32 , __lowerCAmelCase : str=32 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Tuple=37 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : int=512 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : str=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = embedding_size
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
_UpperCAmelCase = TFMobileBertModel(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
_UpperCAmelCase = [input_ids, input_mask]
_UpperCAmelCase = model(__lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ):
_UpperCAmelCase = TFMobileBertForMaskedLM(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ):
_UpperCAmelCase = TFMobileBertForNextSentencePrediction(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = TFMobileBertForPreTraining(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFMobileBertForSequenceClassification(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] ):
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = TFMobileBertForMultipleChoice(config=__lowerCAmelCase )
_UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFMobileBertForTokenClassification(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] ):
_UpperCAmelCase = TFMobileBertForQuestionAnswering(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = TFMobileBertModelTest.TFMobileBertModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : Any ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCAmelCase )
@slow
def lowerCAmelCase_ ( self : int ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
_UpperCAmelCase = TFMobileBertModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_tf
class a ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = TFMobileBertForPreTraining.from_pretrained("""google/mobilebert-uncased""" )
_UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_UpperCAmelCase = model(__lowerCAmelCase )[0]
_UpperCAmelCase = [1, 6, 3_0522]
self.assertEqual(output.shape , __lowerCAmelCase )
_UpperCAmelCase = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 )
| 30 | 0 |
a__: Tuple = 9.8_06_65
def UpperCamelCase__( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float = g )->Dict:
if fluid_density <= 0:
raise ValueError('''Impossible fluid density''' )
if volume < 0:
raise ValueError('''Impossible Object volume''' )
if gravity <= 0:
raise ValueError('''Impossible Gravity''' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 193 |
'''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class lowerCAmelCase__ :
def __init__( self ):
"""simple docstring"""
lowercase_ : int = {}
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Dict = {}
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if nodea not in self.connections:
self.add_node(__SCREAMING_SNAKE_CASE )
if nodea not in self.connections:
self.add_node(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = probability
def _snake_case ( self ):
"""simple docstring"""
return list(self.connections )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Any = 0
lowercase_ : Tuple = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : list[tuple[str, str, float]] , __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : List[Any] = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : str = Counter(graph.get_nodes() )
lowercase_ : Any = start
for _ in range(__SCREAMING_SNAKE_CASE ):
lowercase_ : int = graph.transition(__SCREAMING_SNAKE_CASE )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 93 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowercase__ = StableUnCLIPImgaImgPipeline
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase__ = frozenset([] )
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = 3_2
lowercase_ = embedder_hidden_size
# image encoding components
lowercase_ = CLIPImageProcessor(crop_size=3_2 , size=3_2)
torch.manual_seed(0)
lowercase_ = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCAmelCase_ , projection_dim=lowerCAmelCase_ , num_hidden_layers=5 , num_attention_heads=4 , image_size=3_2 , intermediate_size=3_7 , patch_size=1 , ))
# regular denoising components
torch.manual_seed(0)
lowercase_ = StableUnCLIPImageNormalizer(embedding_dim=lowerCAmelCase_)
lowercase_ = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""")
torch.manual_seed(0)
lowercase_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""")
torch.manual_seed(0)
lowercase_ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCAmelCase_ , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ))
torch.manual_seed(0)
lowercase_ = UNetaDConditionModel(
sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCAmelCase_ , layers_per_block=1 , upcast_attention=lowerCAmelCase_ , use_linear_projection=lowerCAmelCase_ , )
torch.manual_seed(0)
lowercase_ = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=lowerCAmelCase_ , steps_offset=1 , )
torch.manual_seed(0)
lowercase_ = AutoencoderKL()
lowercase_ = {
# image encoding components
"""feature_extractor""": feature_extractor,
"""image_encoder""": image_encoder.eval(),
# image noising components
"""image_normalizer""": image_normalizer.eval(),
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder.eval(),
"""unet""": unet.eval(),
"""scheduler""": scheduler,
"""vae""": vae.eval(),
}
return components
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Any=0 , lowerCAmelCase_ : List[str]=True):
"""simple docstring"""
if str(lowerCAmelCase_).startswith("""mps"""):
lowercase_ = torch.manual_seed(lowerCAmelCase_)
else:
lowercase_ = torch.Generator(device=lowerCAmelCase_).manual_seed(lowerCAmelCase_)
lowercase_ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCAmelCase_)).to(lowerCAmelCase_)
if pil_image:
lowercase_ = input_image * 0.5 + 0.5
lowercase_ = input_image.clamp(0 , 1)
lowercase_ = input_image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
lowercase_ = DiffusionPipeline.numpy_to_pil(lowerCAmelCase_)[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase_ = self.get_dummy_components()
lowercase_ = StableUnCLIPImgaImgPipeline(**lowerCAmelCase_)
lowercase_ = sd_pipe.to(lowerCAmelCase_)
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_)
lowercase_ = self.get_dummy_inputs(lowerCAmelCase_)
inputs.update({"""image_embeds""": None})
lowercase_ = sd_pipe(**lowerCAmelCase_).images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowercase_ = np.array([0.3_872, 0.7_224, 0.5_601, 0.4_741, 0.6_872, 0.5_814, 0.4_636, 0.3_867, 0.5_078])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = torch_device in ["""cpu""", """mps"""]
self._test_attention_slicing_forward_pass(test_max_difference=lowerCAmelCase_)
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=lowerCAmelCase_)
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCAmelCase_)
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""")
lowercase_ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy""")
lowercase_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-l-img2img""" , torch_dtype=torch.floataa)
pipe.to(lowerCAmelCase_)
pipe.set_progress_bar_config(disable=lowerCAmelCase_)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ = torch.Generator(device="""cpu""").manual_seed(0)
lowercase_ = pipe(lowerCAmelCase_ , """anime turle""" , generator=lowerCAmelCase_ , output_type="""np""")
lowercase_ = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""")
lowercase_ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy""")
lowercase_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa)
pipe.to(lowerCAmelCase_)
pipe.set_progress_bar_config(disable=lowerCAmelCase_)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ = torch.Generator(device="""cpu""").manual_seed(0)
lowercase_ = pipe(lowerCAmelCase_ , """anime turle""" , generator=lowerCAmelCase_ , output_type="""np""")
lowercase_ = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""")
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa)
lowercase_ = pipe.to(lowerCAmelCase_)
pipe.set_progress_bar_config(disable=lowerCAmelCase_)
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ = pipe(
lowerCAmelCase_ , """anime turtle""" , num_inference_steps=2 , output_type="""np""" , )
lowercase_ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 1_0**9
| 359 |
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
UpperCAmelCase : Dict = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
UpperCAmelCase : Union[str, Any] = 10
UpperCAmelCase : Union[str, Any] = 256
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[MinHash]:
'''simple docstring'''
if len(__lowerCAmelCase ) < MIN_NUM_TOKENS:
return None
lowercase_ = MinHash(num_perm=__lowerCAmelCase )
for token in set(__lowerCAmelCase ):
min_hash.update(token.encode() )
return min_hash
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Set[str]:
'''simple docstring'''
return {t for t in NON_ALPHA.split(__lowerCAmelCase ) if len(t.strip() ) > 0}
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str] , *,
lowerCAmelCase_ : float = 0.85 , ):
"""simple docstring"""
lowercase_ = duplication_jaccard_threshold
lowercase_ = NUM_PERM
lowercase_ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm)
lowercase_ = defaultdict(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : MinHash):
"""simple docstring"""
lowercase_ = self._index.query(lowerCAmelCase_)
if code_key in self._index.keys:
print(F'''Duplicate key {code_key}''')
return
self._index.insert(lowerCAmelCase_ , lowerCAmelCase_)
if len(lowerCAmelCase_) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(lowerCAmelCase_)
break
else:
self._duplicate_clusters[close_duplicates[0]].add(lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = []
for base, duplicates in self._duplicate_clusters.items():
lowercase_ = [base] + list(lowerCAmelCase_)
# reformat the cluster to be a list of dict
lowercase_ = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(lowerCAmelCase_)
return duplicate_clusters
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = self.get_duplicate_clusters()
with open(lowerCAmelCase_ , """w""") as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ , lowercase_ = element
lowercase_ = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(__lowerCAmelCase , max_queue_size=1_00_00 ) , chunksize=1_00 , ):
if data is not None:
yield data
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = DuplicationIndex(duplication_jaccard_threshold=__lowerCAmelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(__lowerCAmelCase ) ) , max_queue_size=1_00 ) ):
di.add(__lowerCAmelCase , __lowerCAmelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
lowercase_ = get_tokens(__lowerCAmelCase )
lowercase_ = get_tokens(__lowerCAmelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
UpperCAmelCase : Optional[Any] = None
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = []
for elementa in cluster:
lowercase_ = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
lowercase_ = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(__lowerCAmelCase , __lowerCAmelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowercase_ = 1
extremes.append(__lowerCAmelCase )
return extremes
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
global _shared_dataset
lowercase_ = dataset
lowercase_ = []
lowercase_ = partial(_find_cluster_extremes_shared , jaccard_threshold=__lowerCAmelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
__lowerCAmelCase , __lowerCAmelCase , ) , total=len(__lowerCAmelCase ) , ):
extremes_list.append(__lowerCAmelCase )
return extremes_list
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
'''simple docstring'''
lowercase_ = make_duplicate_clusters(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
lowercase_ = {}
lowercase_ = find_extremes(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for extremes in extremes_clusters:
for element in extremes:
lowercase_ = element
lowercase_ = duplicate_indices - set(extreme_dict.keys() )
lowercase_ = dataset.filter(lambda __lowerCAmelCase , __lowerCAmelCase : idx not in remove_indices , with_indices=__lowerCAmelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowercase_ = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
lowercase_ = extreme_dict[element["""base_index"""]]["""copies"""]
print(F'''Original dataset size: {len(__lowerCAmelCase )}''' )
print(F'''Number of duplicate clusters: {len(__lowerCAmelCase )}''' )
print(F'''Files in duplicate cluster: {len(__lowerCAmelCase )}''' )
print(F'''Unique files in duplicate cluster: {len(__lowerCAmelCase )}''' )
print(F'''Filtered dataset size: {len(__lowerCAmelCase )}''' )
return ds_filter, duplicate_clusters
| 313 | 0 |
from functools import lru_cache
@lru_cache
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int) -> int:
'''simple docstring'''
if num < 0:
raise ValueError("Number should not be negative.")
return 1 if num in (0, 1) else num * factorial(num - 1)
if __name__ == "__main__":
import doctest
doctest.testmod() | 232 |
from __future__ import annotations
import math
lowercase : Any = '2020.9.26'
lowercase : Union[str, Any] = 'xcodz-dot, cclaus, dhruvmanila'
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float) -> tuple[float, float]:
'''simple docstring'''
if not all(isinstance(_lowerCamelCase , (float, int)) for val in locals().values()):
__UpperCamelCase : str = F'Input values must either be float or int: {list(locals().values())}'
raise TypeError(_lowerCamelCase)
__UpperCamelCase : List[str] = ((x * distance) / (z + distance)) * scale
__UpperCamelCase : List[Any] = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : str , _lowerCamelCase : float) -> tuple[float, float, float]:
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase):
raise TypeError("Axis must be a str")
__UpperCamelCase : str = locals()
del input_variables["axis"]
if not all(isinstance(_lowerCamelCase , (float, int)) for val in input_variables.values()):
__UpperCamelCase : Dict = (
"Input values except axis must either be float or int: "
F'{list(input_variables.values())}'
)
raise TypeError(_lowerCamelCase)
__UpperCamelCase : Optional[Any] = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
__UpperCamelCase : Tuple = x * math.cos(_lowerCamelCase) - y * math.sin(_lowerCamelCase)
__UpperCamelCase : Union[str, Any] = y * math.cos(_lowerCamelCase) + x * math.sin(_lowerCamelCase)
__UpperCamelCase : Any = z
elif axis == "x":
__UpperCamelCase : Dict = y * math.cos(_lowerCamelCase) - z * math.sin(_lowerCamelCase)
__UpperCamelCase : Any = z * math.cos(_lowerCamelCase) + y * math.sin(_lowerCamelCase)
__UpperCamelCase : List[str] = x
elif axis == "y":
__UpperCamelCase : Any = x * math.cos(_lowerCamelCase) - z * math.sin(_lowerCamelCase)
__UpperCamelCase : Any = z * math.cos(_lowerCamelCase) + x * math.sin(_lowerCamelCase)
__UpperCamelCase : Dict = y
else:
raise ValueError("not a valid axis, choose one of 'x', 'y', 'z'")
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{convert_to_ad(1.0, 2.0, 3.0, 1_0.0, 1_0.0) = }")
print(f"{rotate(1.0, 2.0, 3.0, 'y', 9_0.0) = }") | 232 | 1 |
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def _A (lowerCAmelCase__ :int , lowerCAmelCase__ :Any , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Dict ) -> Optional[int]:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_a = np.full((len(lowerCAmelCase__ ), sequence_length, 2) , lowerCAmelCase__ )
else:
_a = np.full((len(lowerCAmelCase__ ), sequence_length) , lowerCAmelCase__ )
for i, tensor in enumerate(lowerCAmelCase__ ):
if padding_side == "right":
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_a = tensor[:sequence_length]
else:
_a = tensor[:sequence_length]
else:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_a = tensor[:sequence_length]
else:
_a = tensor[:sequence_length]
return out_tensor.tolist()
def _A (lowerCAmelCase__ :Any ) -> Union[str, Any]:
'''simple docstring'''
_a = ord(lowerCAmelCase__ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
_a = unicodedata.category(lowerCAmelCase__ )
if cat.startswith('P' ):
return True
return False
@dataclass
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = 42
_lowerCAmelCase = True
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = -1_0_0
_lowerCAmelCase = "pt"
def __UpperCAmelCase ( self , __magic_name__ ) -> Any:
import torch
_a = 'label' if 'label' in features[0].keys() else 'labels'
_a = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
_a = self.tokenizer.pad(
__magic_name__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' if labels is None else None , )
if labels is None:
return batch
_a = torch.tensor(batch['entity_ids'] ).shape[1]
_a = self.tokenizer.padding_side
if padding_side == "right":
_a = [
list(__magic_name__ ) + [self.label_pad_token_id] * (sequence_length - len(__magic_name__ )) for label in labels
]
else:
_a = [
[self.label_pad_token_id] * (sequence_length - len(__magic_name__ )) + list(__magic_name__ ) for label in labels
]
_a = [feature['ner_tags'] for feature in features]
_a = padding_tensor(__magic_name__ , -1 , __magic_name__ , __magic_name__ )
_a = [feature['original_entity_spans'] for feature in features]
_a = padding_tensor(__magic_name__ , (-1, -1) , __magic_name__ , __magic_name__ )
_a = {k: torch.tensor(__magic_name__ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 104 |
'''simple docstring'''
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
a_ : Optional[Any] = TypeVar("T")
def _A (lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
return (position - 1) // 2
def _A (lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
return (2 * position) + 1
def _A (lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
return (2 * position) + 2
class a ( Generic[T] ):
def __init__( self ) -> None:
_a = []
_a = {}
_a = 0
def __len__( self ) -> int:
return self.elements
def __repr__( self ) -> str:
return str(self.heap )
def __UpperCAmelCase ( self ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
_a = self.elements
self.elements += 1
self._bubble_up(__magic_name__ )
def __UpperCAmelCase ( self ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
_a , _a = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
_a , _a = self.heap[0]
self._bubble_down(__magic_name__ )
return elem
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> None:
# Update the weight of the given key
_a = self.position_map[elem]
_a = (elem, weight)
if position > 0:
_a = get_parent_position(__magic_name__ )
_a , _a = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(__magic_name__ )
else:
self._bubble_down(__magic_name__ )
else:
self._bubble_down(__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
_a = self.position_map[elem]
if curr_pos == 0:
return None
_a = get_parent_position(__magic_name__ )
_a , _a = self.heap[curr_pos]
_a , _a = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(__magic_name__ , __magic_name__ )
return self._bubble_up(__magic_name__ )
return None
def __UpperCAmelCase ( self , __magic_name__ ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
_a = self.position_map[elem]
_a , _a = self.heap[curr_pos]
_a = get_child_left_position(__magic_name__ )
_a = get_child_right_position(__magic_name__ )
if child_left_position < self.elements and child_right_position < self.elements:
_a , _a = self.heap[child_left_position]
_a , _a = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(__magic_name__ , __magic_name__ )
return self._bubble_down(__magic_name__ )
if child_left_position < self.elements:
_a , _a = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(__magic_name__ , __magic_name__ )
return self._bubble_down(__magic_name__ )
else:
return None
if child_right_position < self.elements:
_a , _a = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(__magic_name__ , __magic_name__ )
return self._bubble_down(__magic_name__ )
return None
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> None:
# Swap the nodes at the given positions
_a = self.heap[nodea_pos][0]
_a = self.heap[nodea_pos][0]
_a , _a = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
_a = nodea_pos
_a = nodea_pos
class a ( Generic[T] ):
def __init__( self ) -> None:
_a = {}
_a = 0
def __repr__( self ) -> str:
return str(self.connections )
def __len__( self ) -> int:
return self.nodes
def __UpperCAmelCase ( self , __magic_name__ ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
_a = {}
self.nodes += 1
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(__magic_name__ )
self.add_node(__magic_name__ )
_a = weight
_a = weight
def _A (lowerCAmelCase__ :GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
'''simple docstring'''
_a = {node: maxsize for node in graph.connections}
_a = {node: None for node in graph.connections}
_a = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(lowerCAmelCase__ , lowerCAmelCase__ )
if priority_queue.is_empty():
return dist, parent
# initialization
_a = priority_queue.extract_min()
_a = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_a = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCAmelCase__ , dist[neighbour] )
_a = node
# running prim's algorithm
while not priority_queue.is_empty():
_a = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_a = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCAmelCase__ , dist[neighbour] )
_a = node
return dist, parent
| 104 | 1 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__A =get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class _SCREAMING_SNAKE_CASE ( snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = XLMProphetNetTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = True
def SCREAMING_SNAKE_CASE_( self ) -> Any:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ = XLMProphetNetTokenizer(lowercase , keep_accents=lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = "[PAD]"
lowerCamelCase_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase ) , lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase ) , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "[PAD]" )
self.assertEqual(vocab_keys[1] , "[CLS]" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(lowercase ) , 1012 )
def SCREAMING_SNAKE_CASE_( self ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ = XLMProphetNetTokenizer(lowercase , keep_accents=lowercase )
lowerCamelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowercase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCamelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(lowercase )
self.assertListEqual(
lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
lowerCamelCase_ = tokenizer.convert_ids_to_tokens(lowercase )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"[UNK]",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"[UNK]",
".",
] , )
@cached_property
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased" )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = "Hello World!"
lowerCamelCase_ = [35389, 6672, 49, 2]
self.assertListEqual(lowercase , self.big_tokenizer.encode(lowercase ) )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> str:
# fmt: off
lowerCamelCase_ = {"input_ids": [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase , model_name="microsoft/xprophetnet-large-wiki100-cased" , revision="1acad1643ddd54a44df6a1b797ada8373685d90e" , )
| 19 |
"""simple docstring"""
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = CodeGenTokenizer
lowercase__ = CodeGenTokenizerFast
lowercase__ = True
lowercase__ = {"add_prefix_space": True}
lowercase__ = False
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_snake_case : Tuple = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
_snake_case : Tuple = dict(zip(a_, range(len(a_ ) ) ) )
_snake_case : str = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_snake_case : List[Any] = {"""unk_token""": """<unk>"""}
_snake_case : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] )
_snake_case : Optional[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file, """w""", encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a_ ) + """\n""" )
with open(self.merges_file, """w""", encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(a_ ) )
def UpperCamelCase_ ( self: Any, **a_: int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname, **a_ )
def UpperCamelCase_ ( self: Any, **a_: str ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname, **a_ )
def UpperCamelCase_ ( self: Union[str, Any], a_: Dict ):
'''simple docstring'''
_snake_case : Union[str, Any] = """lower newer"""
_snake_case : Tuple = """lower newer"""
return input_text, output_text
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Union[str, Any] = CodeGenTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
_snake_case : Optional[Any] = """lower newer"""
_snake_case : Optional[int] = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
_snake_case : int = tokenizer.tokenize(a_, add_prefix_space=a_ )
self.assertListEqual(a_, a_ )
_snake_case : str = tokens + [tokenizer.unk_token]
_snake_case : Optional[int] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ), a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_snake_case : int = self.get_tokenizer()
_snake_case : int = self.get_rust_tokenizer(add_prefix_space=a_ )
_snake_case : Dict = """lower newer"""
# Testing tokenization
_snake_case : Dict = tokenizer.tokenize(a_, add_prefix_space=a_ )
_snake_case : List[str] = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Testing conversion to ids without special tokens
_snake_case : Optional[Any] = tokenizer.encode(a_, add_special_tokens=a_, add_prefix_space=a_ )
_snake_case : Tuple = rust_tokenizer.encode(a_, add_special_tokens=a_ )
self.assertListEqual(a_, a_ )
# Testing conversion to ids with special tokens
_snake_case : Tuple = self.get_rust_tokenizer(add_prefix_space=a_ )
_snake_case : int = tokenizer.encode(a_, add_prefix_space=a_ )
_snake_case : Optional[Any] = rust_tokenizer.encode(a_ )
self.assertListEqual(a_, a_ )
# Testing the unknown token
_snake_case : Tuple = tokens + [rust_tokenizer.unk_token]
_snake_case : List[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a_ ), a_ )
def UpperCamelCase_ ( self: Dict, *a_: Dict, **a_: int ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: int, a_: List[Any]=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained(a_, **a_ )
# Simple input
_snake_case : Any = """This is a simple input"""
_snake_case : Optional[int] = ["""This is a simple input 1""", """This is a simple input 2"""]
_snake_case : Optional[int] = ("""This is a simple input""", """This is a pair""")
_snake_case : Optional[Any] = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(a_, tokenizer_r.encode, a_, max_length=a_, padding="""max_length""" )
# Simple input
self.assertRaises(a_, tokenizer_r.encode_plus, a_, max_length=a_, padding="""max_length""" )
# Simple input
self.assertRaises(
a_, tokenizer_r.batch_encode_plus, a_, max_length=a_, padding="""max_length""", )
# Pair input
self.assertRaises(a_, tokenizer_r.encode, a_, max_length=a_, padding="""max_length""" )
# Pair input
self.assertRaises(a_, tokenizer_r.encode_plus, a_, max_length=a_, padding="""max_length""" )
# Pair input
self.assertRaises(
a_, tokenizer_r.batch_encode_plus, a_, max_length=a_, padding="""max_length""", )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = CodeGenTokenizer.from_pretrained(self.tmpdirname, pad_token="""<pad>""" )
# Simple input
_snake_case : List[Any] = """This is a simple input"""
_snake_case : int = ["""This is a simple input looooooooong""", """This is a simple input"""]
_snake_case : Any = ("""This is a simple input""", """This is a pair""")
_snake_case : str = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
_snake_case : str = tokenizer.pad_token_id
_snake_case : Optional[int] = tokenizer(a_, padding="""max_length""", max_length=30, return_tensors="""np""" )
_snake_case : Dict = tokenizer(a_, padding=a_, truncate=a_, return_tensors="""np""" )
_snake_case : Tuple = tokenizer(*a_, padding="""max_length""", max_length=60, return_tensors="""np""" )
_snake_case : Optional[Any] = tokenizer(a_, padding=a_, truncate=a_, return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1], 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1], 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1], 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1], 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = """$$$"""
_snake_case : List[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname, bos_token=a_, add_bos_token=a_ )
_snake_case : str = """This is a simple input"""
_snake_case : int = ["""This is a simple input 1""", """This is a simple input 2"""]
_snake_case : Union[str, Any] = tokenizer.bos_token_id
_snake_case : Tuple = tokenizer(a_ )
_snake_case : Optional[Any] = tokenizer(a_ )
self.assertEqual(out_s.input_ids[0], a_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_snake_case : Optional[int] = tokenizer.decode(out_s.input_ids )
_snake_case : int = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0], a_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : Optional[int] = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" )
_snake_case : Dict = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
_snake_case : Union[str, Any] = """\nif len_a > len_b: result = a\nelse: result = b"""
_snake_case : Optional[Any] = tokenizer.encode(a_ )
_snake_case : Dict = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""]
_snake_case : Optional[Any] = tokenizer.decode(a_, truncate_before_pattern=a_ )
self.assertEqual(a_, a_ )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
pass
| 64 | 0 |
from math import asin, atan, cos, radians, sin, sqrt, tan
snake_case : Optional[Any] = 6378137.0
snake_case : str = 6356752.314245
snake_case : int = 6_37_81_37
def __lowercase ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float ):
a__ = (AXIS_A - AXIS_B) / AXIS_A
a__ = atan((1 - flattening) * tan(radians(__lowerCAmelCase ) ) )
a__ = atan((1 - flattening) * tan(radians(__lowerCAmelCase ) ) )
a__ = radians(__lowerCAmelCase )
a__ = radians(__lowerCAmelCase )
# Equation
a__ = sin((phi_a - phi_a) / 2 )
a__ = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
a__ = sqrt(sin_sq_phi + (cos(__lowerCAmelCase ) * cos(__lowerCAmelCase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109 |
def __lowercase ( __lowerCAmelCase : int ):
if num <= 0:
raise ValueError('Input must be a positive integer' )
a__ = [True] * (num + 1)
a__ = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , __lowerCAmelCase ):
a__ = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case : Optional[Any] = int(input('''Enter a positive integer: ''').strip())
print(prime_sieve_eratosthenes(user_num))
| 109 | 1 |
"""simple docstring"""
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class __lowerCAmelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self , _a ):
super().__init__()
__a = model
__a = 2
__a = nn.Linear(self.model.config.hidden_size , self.num_labels )
def __UpperCAmelCase ( self ):
pass
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : str ) -> Optional[int]:
# load longformer model from model identifier
__a = LongformerModel.from_pretrained(lowerCAmelCase__ )
__a = LightningModel(lowerCAmelCase__ )
__a = torch.load(lowerCAmelCase__ , map_location=torch.device('''cpu''' ) )
lightning_model.load_state_dict(ckpt['''state_dict'''] )
# init longformer question answering model
__a = LongformerForQuestionAnswering.from_pretrained(lowerCAmelCase__ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(lowerCAmelCase__ )
print(f'''Conversion successful. Model saved under {pytorch_dump_folder_path}''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowercase_ = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 45 |
"""simple docstring"""
lowercase_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def lowercase ( lowerCAmelCase__ : bytes ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(lowerCAmelCase__ )
__a = ''''''.join(bin(lowerCAmelCase__ )[2:].zfill(8 ) for byte in data )
__a = len(lowerCAmelCase__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
__a = b'''=''' * ((6 - len(lowerCAmelCase__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(lowerCAmelCase__ ) % 6)
else:
__a = b''''''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(lowerCAmelCase__ ) , 6 ) ).encode()
+ padding
)
def lowercase ( lowerCAmelCase__ : str ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = (
'''argument should be a bytes-like object or ASCII string, '''
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(lowerCAmelCase__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
try:
__a = encoded_data.decode('''utf-8''' )
except UnicodeDecodeError:
raise ValueError('''base64 encoded data should only contain ASCII characters''' )
__a = encoded_data.count('''=''' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(lowerCAmelCase__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
__a = encoded_data[:-padding]
__a = ''''''.join(
bin(B64_CHARSET.index(lowerCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
__a = ''''''.join(
bin(B64_CHARSET.index(lowerCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )
__a = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(lowerCAmelCase__ ) , 8 )
]
return bytes(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 | 1 |
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> list[int]:
A__ = [True] * limit
A__ = False
A__ = False
A__ = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
A__ = i * 2
while index < limit:
A__ = False
A__ = index + i
A__ = [2]
for i in range(3 , lowercase_ , 2 ):
if is_prime[i]:
primes.append(lowercase_ )
return primes
def _SCREAMING_SNAKE_CASE ( lowercase_ = 1_00_00_00 ) -> int:
A__ = prime_sieve(lowercase_ )
A__ = 0
A__ = 0
for i in range(len(lowercase_ ) ):
for j in range(i + length , len(lowercase_ ) ):
A__ = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
A__ = j - i
A__ = sol
return largest
if __name__ == "__main__":
print(f'{solution() = }')
| 230 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
"SCUT-DLVCLab/lilt-roberta-en-base": (
"https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( A_ ):
lowercase__ = '''lilt'''
def __init__( self : List[str] , snake_case_ : Any=30_522 , snake_case_ : Optional[Any]=768 , snake_case_ : Union[str, Any]=12 , snake_case_ : Union[str, Any]=12 , snake_case_ : Any=3_072 , snake_case_ : List[str]="gelu" , snake_case_ : Union[str, Any]=0.1 , snake_case_ : List[Any]=0.1 , snake_case_ : Any=512 , snake_case_ : Optional[Any]=2 , snake_case_ : List[str]=0.02 , snake_case_ : Optional[Any]=1e-12 , snake_case_ : List[Any]=0 , snake_case_ : Any="absolute" , snake_case_ : str=None , snake_case_ : int=4 , snake_case_ : int=1_024 , **snake_case_ : Tuple , ) -> List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=snake_case_ , **snake_case_ )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = classifier_dropout
A__ = channel_shrink_ratio
A__ = max_ad_position_embeddings
| 230 | 1 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class a ( lowercase__ ):
__lowerCAmelCase : Dict = (DDPMScheduler,)
def __lowerCamelCase ( self :Tuple ,**__lowercase :Union[str, Any] ):
snake_case__ : str = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**SCREAMING_SNAKE_CASE__ )
return config
def __lowerCamelCase ( self :int ):
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self :Optional[int] ):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] ,[0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE__ ,beta_end=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self :Optional[int] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self :Dict ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self :Optional[int] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self :Optional[Any] ):
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE__ ,prediction_type=SCREAMING_SNAKE_CASE__ ,sample_max_value=SCREAMING_SNAKE_CASE__ ,)
def __lowerCamelCase ( self :int ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self :Dict ):
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self :List[str] ):
snake_case__ : Optional[Any] = self.scheduler_classes[0]
snake_case__ : Tuple = self.get_scheduler_config()
snake_case__ : Any = scheduler_class(**SCREAMING_SNAKE_CASE__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1e-5
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : Optional[int] = self.scheduler_classes[0]
snake_case__ : str = self.get_scheduler_config()
snake_case__ : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE__ )
snake_case__ : List[str] = len(SCREAMING_SNAKE_CASE__ )
snake_case__ : List[str] = self.dummy_model()
snake_case__ : List[str] = self.dummy_sample_deter
snake_case__ : str = torch.manual_seed(0 )
for t in reversed(range(SCREAMING_SNAKE_CASE__ ) ):
# 1. predict noise residual
snake_case__ : Optional[int] = model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# 2. predict previous mean of sample x_t-1
snake_case__ : int = scheduler.step(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
snake_case__ : List[Any] = pred_prev_sample
snake_case__ : List[Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
snake_case__ : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : List[str] = self.scheduler_classes[0]
snake_case__ : Dict = self.get_scheduler_config(prediction_type='''v_prediction''' )
snake_case__ : Union[str, Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
snake_case__ : str = len(SCREAMING_SNAKE_CASE__ )
snake_case__ : Optional[Any] = self.dummy_model()
snake_case__ : Any = self.dummy_sample_deter
snake_case__ : Any = torch.manual_seed(0 )
for t in reversed(range(SCREAMING_SNAKE_CASE__ ) ):
# 1. predict noise residual
snake_case__ : Optional[Any] = model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# 2. predict previous mean of sample x_t-1
snake_case__ : Union[str, Any] = scheduler.step(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
snake_case__ : Optional[Any] = pred_prev_sample
snake_case__ : List[str] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
snake_case__ : Dict = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def __lowerCamelCase ( self :Any ):
snake_case__ : Union[str, Any] = self.scheduler_classes[0]
snake_case__ : Optional[Any] = self.get_scheduler_config()
snake_case__ : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
snake_case__ : Any = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE__ )
snake_case__ : Any = scheduler.timesteps
for i, timestep in enumerate(SCREAMING_SNAKE_CASE__ ):
if i == len(SCREAMING_SNAKE_CASE__ ) - 1:
snake_case__ : Union[str, Any] = -1
else:
snake_case__ : Union[str, Any] = timesteps[i + 1]
snake_case__ : str = scheduler.previous_timestep(SCREAMING_SNAKE_CASE__ )
snake_case__ : Any = prev_t.item()
self.assertEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : List[Any] = self.scheduler_classes[0]
snake_case__ : Optional[int] = self.get_scheduler_config()
snake_case__ : Any = scheduler_class(**SCREAMING_SNAKE_CASE__ )
snake_case__ : Optional[int] = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(SCREAMING_SNAKE_CASE__ ,msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : List[Any] = self.scheduler_classes[0]
snake_case__ : int = self.get_scheduler_config()
snake_case__ : int = scheduler_class(**SCREAMING_SNAKE_CASE__ )
snake_case__ : Optional[int] = [1_0_0, 8_7, 5_0, 1, 0]
snake_case__ : Optional[Any] = len(SCREAMING_SNAKE_CASE__ )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ,msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE__ ,timesteps=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self :str ):
snake_case__ : List[str] = self.scheduler_classes[0]
snake_case__ : List[Any] = self.get_scheduler_config()
snake_case__ : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
snake_case__ : Optional[Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
SCREAMING_SNAKE_CASE__ ,msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' ,):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE__ )
| 230 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase_ : Optional[int] = {
'facebook/mask2former-swin-small-coco-instance': (
'https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Any = '''mask2former'''
snake_case__ : Any = ['''swin''']
snake_case__ : str = {'''hidden_size''': '''hidden_dim'''}
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Optional[Dict] = None , SCREAMING_SNAKE_CASE__ : int = 2_5_6 , SCREAMING_SNAKE_CASE__ : int = 2_5_6 , SCREAMING_SNAKE_CASE__ : int = 2_5_6 , SCREAMING_SNAKE_CASE__ : int = 1_0_2_4 , SCREAMING_SNAKE_CASE__ : str = "relu" , SCREAMING_SNAKE_CASE__ : int = 6 , SCREAMING_SNAKE_CASE__ : int = 1_0 , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : float = 0.0 , SCREAMING_SNAKE_CASE__ : int = 2_0_4_8 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : int = 4 , SCREAMING_SNAKE_CASE__ : int = 2_5_5 , SCREAMING_SNAKE_CASE__ : int = 1_0_0 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 2.0 , SCREAMING_SNAKE_CASE__ : float = 5.0 , SCREAMING_SNAKE_CASE__ : float = 5.0 , SCREAMING_SNAKE_CASE__ : int = 1_2_5_4_4 , SCREAMING_SNAKE_CASE__ : float = 3.0 , SCREAMING_SNAKE_CASE__ : float = 0.75 , SCREAMING_SNAKE_CASE__ : float = 0.02 , SCREAMING_SNAKE_CASE__ : float = 1.0 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : List[int] = [4, 8, 1_6, 3_2] , SCREAMING_SNAKE_CASE__ : bool = None , **SCREAMING_SNAKE_CASE__ : int , ) -> List[Any]:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' )
a_ : Dict = CONFIG_MAPPING['swin'](
image_size=2_2_4 , in_channels=3 , patch_size=4 , embed_dim=9_6 , depths=[2, 2, 1_8, 2] , num_heads=[3, 6, 1_2, 2_4] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=SCREAMING_SNAKE_CASE__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
a_ : Any = backbone_config.pop('model_type' )
a_ : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
a_ : List[str] = config_class.from_dict(SCREAMING_SNAKE_CASE__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
F"""Supported model types: {",".join(self.backbones_supported )}""" )
a_ : Dict = backbone_config
a_ : List[str] = feature_size
a_ : List[str] = mask_feature_size
a_ : int = hidden_dim
a_ : Dict = encoder_feedforward_dim
a_ : str = activation_function
a_ : List[str] = encoder_layers
a_ : List[str] = decoder_layers
a_ : Dict = num_attention_heads
a_ : str = dropout
a_ : Tuple = dim_feedforward
a_ : List[str] = pre_norm
a_ : Optional[int] = enforce_input_projection
a_ : Any = common_stride
a_ : Optional[int] = ignore_value
a_ : int = num_queries
a_ : Tuple = no_object_weight
a_ : Dict = class_weight
a_ : Optional[int] = mask_weight
a_ : Optional[int] = dice_weight
a_ : str = train_num_points
a_ : List[str] = oversample_ratio
a_ : List[Any] = importance_sample_ratio
a_ : Any = init_std
a_ : Union[str, Any] = init_xavier_std
a_ : Union[str, Any] = use_auxiliary_loss
a_ : Dict = feature_strides
a_ : List[str] = output_auxiliary_logits
a_ : Dict = decoder_layers
super().__init__(**SCREAMING_SNAKE_CASE__ )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : str , SCREAMING_SNAKE_CASE__ : PretrainedConfig , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[Any]:
return cls(
backbone_config=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict[str, any]:
a_ : Optional[int] = copy.deepcopy(self.__dict__ )
a_ : List[Any] = self.backbone_config.to_dict()
a_ : Optional[Any] = self.__class__.model_type
return output
| 32 | 0 |
'''simple docstring'''
import os
def UpperCAmelCase_ (__a : str = "input.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(__a ) , __a ) ) as input_file:
_a : List[str] = [
[int(__a ) for element in line.split(',' )]
for line in input_file.readlines()
]
_a : List[str] = len(__a )
_a : Union[str, Any] = len(matrix[0] )
_a : str = [[-1 for _ in range(__a )] for _ in range(__a )]
for i in range(__a ):
_a : Optional[Any] = matrix[i][0]
for j in range(1 , __a ):
for i in range(__a ):
_a : int = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , __a ):
_a : Optional[int] = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
_a : Optional[int] = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5 |
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
__lowerCAmelCase = {
"""169M""": 1_2,
"""430M""": 2_4,
"""1B5""": 2_4,
"""3B""": 3_2,
"""7B""": 3_2,
"""14B""": 4_0,
}
__lowerCAmelCase = {
"""169M""": 7_6_8,
"""430M""": 1_0_2_4,
"""1B5""": 2_0_4_8,
"""3B""": 2_5_6_0,
"""7B""": 4_0_9_6,
"""14B""": 5_1_2_0,
}
def UpperCAmelCase_ (__a : Dict ):
"""simple docstring"""
_a : List[Any] = list(state_dict.keys() )
for name in state_dict_keys:
_a : List[Any] = state_dict.pop(__a )
# emb -> embedding
if name.startswith('emb.' ):
_a : List[str] = name.replace('emb.' , 'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
_a : Dict = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' )
# att -> attention
_a : int = re.sub(R'blocks\.(\d+)\.att' , R'blocks.\1.attention' , __a )
# ffn -> feed_forward
_a : str = re.sub(R'blocks\.(\d+)\.ffn' , R'blocks.\1.feed_forward' , __a )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
_a : Any = name.replace('.time_mix_k' , '.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
_a : int = name.replace('.time_mix_v' , '.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
_a : Tuple = name.replace('.time_mix_r' , '.time_mix_receptance' )
if name != "head.weight":
_a : Tuple = 'rwkv.' + name
_a : List[Any] = weight
return state_dict
def UpperCAmelCase_ (__a : Tuple , __a : Union[str, Any] , __a : List[str] , __a : str=None , __a : List[str]=None , __a : int=False , __a : int=None ):
"""simple docstring"""
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
_a : List[Any] = 5_0_2_7_7
_a : Optional[Any] = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
_a : Optional[Any] = PreTrainedTokenizerFast(tokenizer_file=__a )
_a : List[Any] = len(__a )
tokenizer.save_pretrained(__a )
# 2. Build the config
_a : List[str] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_a : str = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(f"""`size` should be one of {possible_sizes}, got {size}.""" )
_a : str = RwkvConfig(
vocab_size=__a , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__a )
# 3. Download model file then convert state_dict
_a : Tuple = hf_hub_download(__a , __a )
_a : Optional[int] = torch.load(__a , map_location='cpu' )
_a : Dict = convert_state_dict(__a )
# 4. Split in shards and save
_a, _a : List[Any] = shard_checkpoint(__a )
for shard_file, shard in shards.items():
torch.save(__a , os.path.join(__a , __a ) )
if index is not None:
_a : Dict = os.path.join(__a , __a )
# Save the index as well
with open(__a , 'w' , encoding='utf-8' ) as f:
_a : List[Any] = json.dumps(__a , indent=2 , sort_keys=__a ) + '\n'
f.write(__a )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
_a : List[Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_a : Optional[Any] = torch.load(os.path.join(__a , __a ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__a , __a ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
_a : List[str] = AutoModelForCausalLM.from_pretrained(__a )
model.push_to_hub(__a , max_shard_size='2GB' )
tokenizer.push_to_hub(__a )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
__lowerCAmelCase = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 5 | 1 |
"""simple docstring"""
from __future__ import annotations
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = text, pattern
snake_case = len(__UpperCamelCase ), len(__UpperCamelCase )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def snake_case ( self ):
"""simple docstring"""
snake_case = []
for i in range(self.textLen - self.patLen + 1 ):
snake_case = self.mismatch_in_text(__UpperCamelCase )
if mismatch_index == -1:
positions.append(__UpperCamelCase )
else:
snake_case = self.match_in_pattern(self.text[mismatch_index] )
snake_case = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
SCREAMING_SNAKE_CASE__ = "ABAABA"
SCREAMING_SNAKE_CASE__ = "AB"
SCREAMING_SNAKE_CASE__ = BoyerMooreSearch(text, pattern)
SCREAMING_SNAKE_CASE__ = bms.bad_character_heuristic()
if len(positions) == 0:
print("No match found")
else:
print("Pattern found in following positions: ")
print(positions)
| 150 | """simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
class UpperCamelCase ( lowercase_ ):
lowercase = ['pixel_values']
def __init__( self ,__UpperCamelCase = True ,__UpperCamelCase = None ,__UpperCamelCase = 0.9 ,__UpperCamelCase = PILImageResampling.BICUBIC ,__UpperCamelCase = True ,__UpperCamelCase = None ,__UpperCamelCase = 1 / 255 ,__UpperCamelCase = True ,__UpperCamelCase = True ,__UpperCamelCase = None ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> None:
'''simple docstring'''
super().__init__(**__UpperCamelCase )
lowercase_ : Optional[int] = size if size is not None else {'shortest_edge': 224}
lowercase_ : Union[str, Any] = get_size_dict(__UpperCamelCase ,default_to_square=__UpperCamelCase )
lowercase_ : Union[str, Any] = crop_size if crop_size is not None else {'height': 224, 'width': 224}
lowercase_ : Optional[int] = get_size_dict(__UpperCamelCase ,param_name='crop_size' )
lowercase_ : List[str] = do_resize
lowercase_ : List[Any] = size
lowercase_ : int = crop_pct
lowercase_ : Dict = resample
lowercase_ : List[str] = do_center_crop
lowercase_ : Union[str, Any] = crop_size
lowercase_ : List[Any] = do_rescale
lowercase_ : Tuple = rescale_factor
lowercase_ : Tuple = do_normalize
lowercase_ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowercase_ : int = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = PILImageResampling.BICUBIC ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> np.ndarray:
'''simple docstring'''
lowercase_ : Any = get_size_dict(__UpperCamelCase ,default_to_square=__UpperCamelCase )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f'''size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
if crop_pct is not None:
if "shortest_edge" in size:
lowercase_ : Union[str, Any] = int(size['shortest_edge'] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
lowercase_ : Tuple = int(size['height'] / crop_pct )
else:
lowercase_ : Dict = (int(size['height'] / crop_pct ), int(size['width'] / crop_pct ))
else:
raise ValueError('Invalid size for resize: {}'.format(__UpperCamelCase ) )
lowercase_ : int = get_resize_output_image_size(__UpperCamelCase ,size=__UpperCamelCase ,default_to_square=__UpperCamelCase )
else:
if "shortest_edge" in size:
lowercase_ : Optional[int] = get_resize_output_image_size(__UpperCamelCase ,size=size['shortest_edge'] ,default_to_square=__UpperCamelCase )
elif "height" in size and "width" in size:
lowercase_ : Dict = (size['height'], size['width'])
else:
raise ValueError('Invalid size for resize: {}'.format(__UpperCamelCase ) )
return resize(__UpperCamelCase ,size=__UpperCamelCase ,resample=__UpperCamelCase ,data_format=__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> np.ndarray:
'''simple docstring'''
lowercase_ : List[Any] = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''size must contain \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(__UpperCamelCase ,size=(size['height'], size['width']) ,data_format=__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> str:
'''simple docstring'''
return rescale(__UpperCamelCase ,scale=__UpperCamelCase ,data_format=__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> np.ndarray:
'''simple docstring'''
return normalize(__UpperCamelCase ,mean=__UpperCamelCase ,std=__UpperCamelCase ,data_format=__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = ChannelDimension.FIRST ,**__UpperCamelCase ,) -> PIL.Image.Image:
'''simple docstring'''
lowercase_ : List[Any] = do_resize if do_resize is not None else self.do_resize
lowercase_ : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct
lowercase_ : List[str] = resample if resample is not None else self.resample
lowercase_ : str = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase_ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ : str = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ : str = image_mean if image_mean is not None else self.image_mean
lowercase_ : Tuple = image_std if image_std is not None else self.image_std
lowercase_ : Optional[Any] = size if size is not None else self.size
lowercase_ : Tuple = get_size_dict(__UpperCamelCase ,default_to_square=__UpperCamelCase )
lowercase_ : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
lowercase_ : List[str] = get_size_dict(__UpperCamelCase ,param_name='crop_size' )
lowercase_ : str = make_list_of_images(__UpperCamelCase )
if not valid_images(__UpperCamelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_pct is None:
raise ValueError('Crop_pct must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowercase_ : Optional[Any] = [to_numpy_array(__UpperCamelCase ) for image in images]
if do_resize:
lowercase_ : str = [self.resize(image=__UpperCamelCase ,size=__UpperCamelCase ,crop_pct=__UpperCamelCase ,resample=__UpperCamelCase ) for image in images]
if do_center_crop:
lowercase_ : str = [self.center_crop(image=__UpperCamelCase ,size=__UpperCamelCase ) for image in images]
if do_rescale:
lowercase_ : Any = [self.rescale(image=__UpperCamelCase ,scale=__UpperCamelCase ) for image in images]
if do_normalize:
lowercase_ : int = [self.normalize(image=__UpperCamelCase ,mean=__UpperCamelCase ,std=__UpperCamelCase ) for image in images]
lowercase_ : Dict = [to_channel_dimension_format(__UpperCamelCase ,__UpperCamelCase ) for image in images]
lowercase_ : Any = {'pixel_values': images}
return BatchFeature(data=__UpperCamelCase ,tensor_type=__UpperCamelCase )
| 213 | 0 |
'''simple docstring'''
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def _lowerCAmelCase ( ) -> Union[str, Any]:
__A : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--model_ckpt' , type=__snake_case , default='microsoft/unixcoder-base-nine' )
parser.add_argument('--num_epochs' , type=__snake_case , default=5 )
parser.add_argument('--batch_size' , type=__snake_case , default=6 )
parser.add_argument('--gradient_accumulation_steps' , type=__snake_case , default=1 )
parser.add_argument('--freeze' , type=__snake_case , default=__snake_case )
parser.add_argument('--learning_rate' , type=__snake_case , default=5e-4 )
parser.add_argument('--seed' , type=__snake_case , default=0 )
parser.add_argument('--lr_scheduler_type' , type=__snake_case , default='cosine' )
parser.add_argument('--num_warmup_steps' , type=__snake_case , default=10 )
parser.add_argument('--weight_decay' , type=__snake_case , default=0.01 )
parser.add_argument('--output_dir' , type=__snake_case , default='./results' )
return parser.parse_args()
lowercase__ : Tuple = load('''accuracy''')
def _lowerCAmelCase ( __snake_case : int ) -> Any:
__A ,__A : List[Any] = eval_pred
__A : Dict = np.argmax(__snake_case , axis=1 )
return metric.compute(predictions=__snake_case , references=__snake_case )
class SCREAMING_SNAKE_CASE (a__ ):
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
super().__init__()
__A : int = trainer
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
if control.should_evaluate:
__A : str = deepcopy(_UpperCAmelCase)
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='train')
return control_copy
def _lowerCAmelCase ( ) -> str:
__A : List[Any] = get_args()
set_seed(args.seed )
__A : Union[str, Any] = load_dataset('codeparrot/codecomplex' , split='train' )
__A : Optional[int] = dataset.train_test_split(test_size=0.2 )
__A : Union[str, Any] = train_test['test'].train_test_split(test_size=0.5 )
__A : Optional[Any] = DatasetDict(
{
'train': train_test['train'],
'test': test_validation['train'],
'valid': test_validation['test'],
} )
print('Loading tokenizer and model' )
__A : Optional[Any] = AutoTokenizer.from_pretrained(args.model_ckpt )
__A : Tuple = tokenizer.eos_token
__A : Dict = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
__A : Optional[int] = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
__A : Optional[Any] = False
__A : Dict = ClassLabel(num_classes=7 , names=list(set(train_test_validation['train']['complexity'] ) ) )
def tokenize(__snake_case : Optional[Any] ):
__A : Optional[Any] = tokenizer(example['src'] , truncation=__snake_case , max_length=10_24 )
__A : str = labels.straint(example['complexity'] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
__A : str = train_test_validation.map(
__snake_case , batched=__snake_case , remove_columns=train_test_validation['train'].column_names , )
__A : str = DataCollatorWithPadding(tokenizer=__snake_case )
__A : str = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='epoch' , save_strategy='epoch' , logging_strategy='epoch' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='accuracy' , run_name='complexity-java' , report_to='wandb' , )
__A : Tuple = Trainer(
model=__snake_case , args=__snake_case , train_dataset=tokenized_datasets['train'] , eval_dataset=tokenized_datasets['valid'] , tokenizer=__snake_case , data_collator=__snake_case , compute_metrics=__snake_case , )
print('Training...' )
trainer.add_callback(CustomCallback(__snake_case ) )
trainer.train()
if __name__ == "__main__":
main() | 190 |
'''simple docstring'''
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
lowercase__ : List[Any] = '''bert-base-cased'''
lowercase__ : Union[str, Any] = '''google/pegasus-xsum'''
lowercase__ : str = [''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
lowercase__ : Optional[Any] = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
lowercase__ : str = '''patrickvonplaten/t5-tiny-random'''
lowercase__ : List[str] = '''sshleifer/bart-tiny-random'''
lowercase__ : List[str] = '''sshleifer/tiny-mbart'''
lowercase__ : str = '''sshleifer/tiny-marian-en-de'''
def _lowerCAmelCase ( __snake_case : Path , __snake_case : list ) -> str:
__A : Any = '\n'.join(__snake_case )
Path(__snake_case ).open('w' ).writelines(__snake_case )
def _lowerCAmelCase ( __snake_case : Optional[int] ) -> Tuple:
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__snake_case , f'{split}.source' ) , __snake_case )
_dump_articles(os.path.join(__snake_case , f'{split}.target' ) , __snake_case )
return tmp_dir
class SCREAMING_SNAKE_CASE (a__ ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : str = AutoTokenizer.from_pretrained(_UpperCAmelCase)
__A : int = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())
__A : int = max(len(tokenizer.encode(_UpperCAmelCase)) for a in ARTICLES)
__A : str = max(len(tokenizer.encode(_UpperCAmelCase)) for a in SUMMARIES)
__A : Dict = 4
__A : Optional[Any] = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
__A ,__A : Any = 'ro_RO', 'de_DE' # ignored for all but mbart, but never causes error.
__A : List[str] = SeqaSeqDataset(
_UpperCAmelCase , data_dir=_UpperCAmelCase , type_path='train' , max_source_length=_UpperCAmelCase , max_target_length=_UpperCAmelCase , src_lang=_UpperCAmelCase , tgt_lang=_UpperCAmelCase , )
__A : Any = DataLoader(_UpperCAmelCase , batch_size=2 , collate_fn=train_dataset.collate_fn)
for batch in dataloader:
assert isinstance(_UpperCAmelCase , _UpperCAmelCase)
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
__A : Optional[Any] = shift_tokens_right(batch['labels'] , tokenizer.pad_token_id)
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED])
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : str = AutoTokenizer.from_pretrained(_UpperCAmelCase)
__A : Optional[int] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())
__A : Tuple = max(len(tokenizer.encode(_UpperCAmelCase)) for a in ARTICLES)
__A : Any = max(len(tokenizer.encode(_UpperCAmelCase)) for a in SUMMARIES)
__A : Optional[int] = 4
__A : Any = LegacySeqaSeqDataset(
_UpperCAmelCase , data_dir=_UpperCAmelCase , type_path='train' , max_source_length=20 , max_target_length=_UpperCAmelCase , )
__A : Union[str, Any] = DataLoader(_UpperCAmelCase , batch_size=2 , collate_fn=train_dataset.collate_fn)
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = AutoTokenizer.from_pretrained('facebook/mbart-large-cc25')
__A : int = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()))
__A : List[str] = tmp_dir.joinpath('train.source').open().readlines()
__A : Optional[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()))
pack_data_dir(_UpperCAmelCase , _UpperCAmelCase , 128 , _UpperCAmelCase)
__A : Dict = {x.name for x in tmp_dir.iterdir()}
__A : Dict = {x.name for x in save_dir.iterdir()}
__A : str = save_dir.joinpath('train.source').open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(_UpperCAmelCase) < len(_UpperCAmelCase)
assert len(_UpperCAmelCase) == 1
assert len(packed_examples[0]) == sum(len(_UpperCAmelCase) for x in orig_examples)
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='This test requires fairseq')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
if not FAIRSEQ_AVAILABLE:
return
__A ,__A ,__A : List[Any] = self._get_dataset(max_len=64)
__A : Union[str, Any] = 64
__A : List[Any] = ds.make_dynamic_sampler(_UpperCAmelCase , required_batch_size_multiple=_UpperCAmelCase)
__A : Union[str, Any] = [len(_UpperCAmelCase) for x in batch_sampler]
assert len(set(_UpperCAmelCase)) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(_UpperCAmelCase) == len(_UpperCAmelCase) # no dropped or added examples
__A : List[Any] = DataLoader(_UpperCAmelCase , batch_sampler=_UpperCAmelCase , collate_fn=ds.collate_fn , num_workers=2)
__A : Optional[int] = []
__A : Tuple = []
for batch in data_loader:
__A : Optional[int] = batch['input_ids'].shape
__A : Any = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
__A : Tuple = np.product(batch['input_ids'].shape)
num_src_per_batch.append(_UpperCAmelCase)
if num_src_tokens > (max_tokens * 1.1):
failures.append(_UpperCAmelCase)
assert num_src_per_batch[0] == max(_UpperCAmelCase)
if failures:
raise AssertionError(F'too many tokens in {len(_UpperCAmelCase)} batches')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A ,__A : Optional[int] = self._get_dataset(max_len=512)
__A : Optional[int] = 2
__A : Dict = ds.make_sortish_sampler(_UpperCAmelCase , shuffle=_UpperCAmelCase)
__A : Tuple = DataLoader(_UpperCAmelCase , batch_size=_UpperCAmelCase , collate_fn=ds.collate_fn , num_workers=2)
__A : Union[str, Any] = DataLoader(_UpperCAmelCase , batch_size=_UpperCAmelCase , collate_fn=ds.collate_fn , num_workers=2 , sampler=_UpperCAmelCase)
__A : str = tokenizer.pad_token_id
def count_pad_tokens(_UpperCAmelCase , _UpperCAmelCase="input_ids"):
return [batch[k].eq(_UpperCAmelCase).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(_UpperCAmelCase , k='labels')) < sum(count_pad_tokens(_UpperCAmelCase , k='labels'))
assert sum(count_pad_tokens(_UpperCAmelCase)) < sum(count_pad_tokens(_UpperCAmelCase))
assert len(_UpperCAmelCase) == len(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase=1000 , _UpperCAmelCase=128):
'''simple docstring'''
if os.getenv('USE_REAL_DATA' , _UpperCAmelCase):
__A : Dict = 'examples/seq2seq/wmt_en_ro'
__A : Any = max_len * 2 * 64
if not Path(_UpperCAmelCase).joinpath('train.len').exists():
save_len_file(_UpperCAmelCase , _UpperCAmelCase)
else:
__A : int = 'examples/seq2seq/test_data/wmt_en_ro'
__A : Any = max_len * 4
save_len_file(_UpperCAmelCase , _UpperCAmelCase)
__A : Tuple = AutoTokenizer.from_pretrained(_UpperCAmelCase)
__A : Optional[int] = SeqaSeqDataset(
_UpperCAmelCase , data_dir=_UpperCAmelCase , type_path='train' , max_source_length=_UpperCAmelCase , max_target_length=_UpperCAmelCase , n_obs=_UpperCAmelCase , )
return ds, max_tokens, tokenizer
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A ,__A : Tuple = self._get_dataset()
__A : Optional[int] = set(DistributedSortishSampler(_UpperCAmelCase , 256 , num_replicas=2 , rank=0 , add_extra_examples=_UpperCAmelCase))
__A : List[str] = set(DistributedSortishSampler(_UpperCAmelCase , 256 , num_replicas=2 , rank=1 , add_extra_examples=_UpperCAmelCase))
assert idsa.intersection(_UpperCAmelCase) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Union[str, Any] = AutoTokenizer.from_pretrained(_UpperCAmelCase , use_fast=_UpperCAmelCase)
if tok_name == MBART_TINY:
__A : Dict = SeqaSeqDataset(
_UpperCAmelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) , type_path='train' , max_source_length=4 , max_target_length=8 , src_lang='EN' , tgt_lang='FR' , )
__A : List[Any] = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
__A : Any = SeqaSeqDataset(
_UpperCAmelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) , type_path='train' , max_source_length=4 , max_target_length=8 , )
__A : List[str] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(_UpperCAmelCase) == 1 if tok_name == BART_TINY else len(_UpperCAmelCase) == 0 | 190 | 1 |
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase :int = MODEL_FOR_CAUSAL_LM_MAPPING
__lowerCAmelCase :Dict = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : List[str] = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""pt""" )
# Using `do_sample=False` to force deterministic output
a__ : Union[str, Any] = text_generator("""This is a test""" , do_sample=__lowercase )
self.assertEqual(
__lowercase , [
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
] , )
a__ : Dict = text_generator(["""This is a test""", """This is a second test"""] )
self.assertEqual(
__lowercase , [
[
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"""
""" oscope. oscope. FiliFili@@"""
)
}
],
] , )
a__ : List[str] = text_generator("""This is a test""" , do_sample=__lowercase , num_return_sequences=2 , return_tensors=__lowercase )
self.assertEqual(
__lowercase , [
{"""generated_token_ids""": ANY(__lowercase )},
{"""generated_token_ids""": ANY(__lowercase )},
] , )
a__ : List[Any] = text_generator.model.config.eos_token_id
a__ : List[str] = """<pad>"""
a__ : int = text_generator(
["""This is a test""", """This is a second test"""] , do_sample=__lowercase , num_return_sequences=2 , batch_size=2 , return_tensors=__lowercase , )
self.assertEqual(
__lowercase , [
[
{"""generated_token_ids""": ANY(__lowercase )},
{"""generated_token_ids""": ANY(__lowercase )},
],
[
{"""generated_token_ids""": ANY(__lowercase )},
{"""generated_token_ids""": ANY(__lowercase )},
],
] , )
@require_tf
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
a__ : Optional[int] = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""tf""" )
# Using `do_sample=False` to force deterministic output
a__ : Dict = text_generator("""This is a test""" , do_sample=__lowercase )
self.assertEqual(
__lowercase , [
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
] , )
a__ : int = text_generator(["""This is a test""", """This is a second test"""] , do_sample=__lowercase )
self.assertEqual(
__lowercase , [
[
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"""
""" Cannes 閲閲Cannes Cannes Cannes 攵 please,"""
)
}
],
] , )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase ) -> str:
"""simple docstring"""
a__ : Any = TextGenerationPipeline(model=__lowercase , tokenizer=__lowercase )
return text_generator, ["This is a test", "Another test"]
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
a__ : Optional[Any] = """Hello I believe in"""
a__ : Optional[Any] = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
a__ : Tuple = text_generator(__lowercase )
self.assertEqual(
__lowercase , [{"""generated_text""": """Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"""}] , )
a__ : Any = text_generator(__lowercase , stop_sequence=""" fe""" )
self.assertEqual(__lowercase , [{"""generated_text""": """Hello I believe in fe"""}] )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase ) -> List[Any]:
"""simple docstring"""
a__ : Optional[int] = text_generator.model
a__ : List[str] = text_generator.tokenizer
a__ : Optional[Any] = text_generator("""This is a test""" )
self.assertEqual(__lowercase , [{"""generated_text""": ANY(__lowercase )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
a__ : Tuple = text_generator("""This is a test""" , return_full_text=__lowercase )
self.assertEqual(__lowercase , [{"""generated_text""": ANY(__lowercase )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
a__ : List[str] = pipeline(task="""text-generation""" , model=__lowercase , tokenizer=__lowercase , return_full_text=__lowercase )
a__ : Tuple = text_generator("""This is a test""" )
self.assertEqual(__lowercase , [{"""generated_text""": ANY(__lowercase )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
a__ : str = text_generator("""This is a test""" , return_full_text=__lowercase )
self.assertEqual(__lowercase , [{"""generated_text""": ANY(__lowercase )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
a__ : Union[str, Any] = text_generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=__lowercase )
self.assertEqual(
__lowercase , [
[{"""generated_text""": ANY(__lowercase )}, {"""generated_text""": ANY(__lowercase )}],
[{"""generated_text""": ANY(__lowercase )}, {"""generated_text""": ANY(__lowercase )}],
] , )
if text_generator.tokenizer.pad_token is not None:
a__ : List[str] = text_generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=__lowercase )
self.assertEqual(
__lowercase , [
[{"""generated_text""": ANY(__lowercase )}, {"""generated_text""": ANY(__lowercase )}],
[{"""generated_text""": ANY(__lowercase )}, {"""generated_text""": ANY(__lowercase )}],
] , )
with self.assertRaises(__lowercase ):
a__ : Any = text_generator("""test""" , return_full_text=__lowercase , return_text=__lowercase )
with self.assertRaises(__lowercase ):
a__ : str = text_generator("""test""" , return_full_text=__lowercase , return_tensors=__lowercase )
with self.assertRaises(__lowercase ):
a__ : Any = text_generator("""test""" , return_text=__lowercase , return_tensors=__lowercase )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
a__ : List[Any] = text_generator("""""" )
self.assertEqual(__lowercase , [{"""generated_text""": ANY(__lowercase )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
a__ : Tuple = text_generator("""""" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
a__ : Tuple = ["""RwkvForCausalLM""", """XGLMForCausalLM""", """GPTNeoXForCausalLM"""]
if (
tokenizer.model_max_length < 1_0_0_0_0
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("""This is a test""" * 5_0_0 , max_new_tokens=2_0 )
a__ : Any = text_generator("""This is a test""" * 5_0_0 , handle_long_generation="""hole""" , max_new_tokens=2_0 )
# Hole strategy cannot work
with self.assertRaises(__lowercase ):
text_generator(
"""This is a test""" * 5_0_0 , handle_long_generation="""hole""" , max_new_tokens=tokenizer.model_max_length + 1_0 , )
@require_torch
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
import torch
# Classic `model_kwargs`
a__ : Optional[Any] = pipeline(
model="""hf-internal-testing/tiny-random-bloom""" , model_kwargs={"""device_map""": """auto""", """torch_dtype""": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
a__ : Optional[int] = pipe("""This is a test""" )
self.assertEqual(
__lowercase , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
a__ : Optional[Any] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
a__ : Dict = pipe("""This is a test""" )
self.assertEqual(
__lowercase , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
a__ : Tuple = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
a__ : int = pipe("""This is a test""" )
self.assertEqual(
__lowercase , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
@require_torch
@require_torch_gpu
def SCREAMING_SNAKE_CASE__( self ) -> int:
"""simple docstring"""
import torch
a__ : Optional[Any] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device=0 , torch_dtype=torch.floataa )
pipe("""This is a test""" )
@require_torch
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE__( self ) -> str:
"""simple docstring"""
import torch
a__ : List[Any] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.floataa )
pipe("""This is a test""" , do_sample=__lowercase , top_p=0.5 )
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
a__ : Union[str, Any] = """Hello world"""
a__ : str = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
if text_generator.model.framework == "tf":
a__ : Dict = logging.get_logger("""transformers.generation.tf_utils""" )
else:
a__ : Dict = logging.get_logger("""transformers.generation.utils""" )
a__ : Optional[Any] = """Both `max_new_tokens`""" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(__lowercase ) as cl:
a__ : Any = text_generator(__lowercase , max_length=1_0 , max_new_tokens=1 )
self.assertIn(__lowercase , cl.out )
# The user only sets one -> no warning
with CaptureLogger(__lowercase ) as cl:
a__ : int = text_generator(__lowercase , max_new_tokens=1 )
self.assertNotIn(__lowercase , cl.out )
with CaptureLogger(__lowercase ) as cl:
a__ : List[str] = text_generator(__lowercase , max_length=1_0 )
self.assertNotIn(__lowercase , cl.out )
| 170 |
from __future__ import annotations
import math
def lowerCAmelCase_ ( _lowercase : int) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowercase) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_lowercase : str =[num for num in range(3, 10_0001, 2) if not is_prime(num)]
def lowerCAmelCase_ ( _lowercase : int) -> list[int]:
"""simple docstring"""
if not isinstance(_lowercase , _lowercase):
raise ValueError("""n must be an integer""")
if n <= 0:
raise ValueError("""n must be >= 0""")
a__ : int = []
for num in range(len(_lowercase)):
a__ : Any = 0
while 2 * i * i <= odd_composites[num]:
a__ : Union[str, Any] = odd_composites[num] - 2 * i * i
if is_prime(_lowercase):
break
i += 1
else:
list_nums.append(odd_composites[num])
if len(_lowercase) == n:
return list_nums
return []
def lowerCAmelCase_ ( ) -> int:
"""simple docstring"""
return compute_nums(1)[0]
if __name__ == "__main__":
print(f'{solution() = }')
| 170 | 1 |
'''simple docstring'''
from graphs.minimum_spanning_tree_kruskal import kruskal
def _A ( ):
"""simple docstring"""
__lowercase =9
__lowercase =[
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
__lowercase =kruskal(_lowerCAmelCase , _lowerCAmelCase )
__lowercase =[
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(_lowerCAmelCase ) == sorted(_lowerCAmelCase )
| 48 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =SwinConfig(image_size=192 )
if "base" in model_name:
__lowercase =6
__lowercase =128
__lowercase =(2, 2, 18, 2)
__lowercase =(4, 8, 16, 32)
elif "large" in model_name:
__lowercase =12
__lowercase =192
__lowercase =(2, 2, 18, 2)
__lowercase =(6, 12, 24, 48)
else:
raise ValueError('Model not supported, only supports base and large variants' )
__lowercase =window_size
__lowercase =embed_dim
__lowercase =depths
__lowercase =num_heads
return config
def _A ( _lowerCAmelCase ):
"""simple docstring"""
if "encoder.mask_token" in name:
__lowercase =name.replace('encoder.mask_token' , 'embeddings.mask_token' )
if "encoder.patch_embed.proj" in name:
__lowercase =name.replace('encoder.patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "encoder.patch_embed.norm" in name:
__lowercase =name.replace('encoder.patch_embed.norm' , 'embeddings.norm' )
if "attn.proj" in name:
__lowercase =name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
__lowercase =name.replace('attn' , 'attention.self' )
if "norm1" in name:
__lowercase =name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__lowercase =name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__lowercase =name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__lowercase =name.replace('mlp.fc2' , 'output.dense' )
if name == "encoder.norm.weight":
__lowercase ='layernorm.weight'
if name == "encoder.norm.bias":
__lowercase ='layernorm.bias'
if "decoder" in name:
pass
else:
__lowercase ='swin.' + name
return name
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__lowercase =orig_state_dict.pop(_lowerCAmelCase )
if "attn_mask" in key:
pass
elif "qkv" in key:
__lowercase =key.split('.' )
__lowercase =int(key_split[2] )
__lowercase =int(key_split[4] )
__lowercase =model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__lowercase =val[:dim, :]
__lowercase =val[
dim : dim * 2, :
]
__lowercase =val[-dim:, :]
else:
__lowercase =val[
:dim
]
__lowercase =val[
dim : dim * 2
]
__lowercase =val[
-dim:
]
else:
__lowercase =val
return orig_state_dict
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =torch.load(_lowerCAmelCase , map_location='cpu' )['model']
__lowercase =get_swin_config(_lowerCAmelCase )
__lowercase =SwinForMaskedImageModeling(_lowerCAmelCase )
model.eval()
__lowercase =convert_state_dict(_lowerCAmelCase , _lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
__lowercase ='http://images.cocodataset.org/val2017/000000039769.jpg'
__lowercase =ViTImageProcessor(size={'height': 192, 'width': 192} )
__lowercase =Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
__lowercase =image_processor(images=_lowerCAmelCase , return_tensors='pt' )
with torch.no_grad():
__lowercase =model(**_lowerCAmelCase ).logits
print(outputs.keys() )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCAmelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
print(f"""Pushing model and image processor for {model_name} to hub""" )
model.push_to_hub(f"""microsoft/{model_name}""" )
image_processor.push_to_hub(f"""microsoft/{model_name}""" )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""swin-base-simmim-window6-192""",
type=str,
choices=["""swin-base-simmim-window6-192""", """swin-large-simmim-window12-192"""],
help="""Name of the Swin SimMIM model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth""",
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 48 | 1 |
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 268 |
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class UpperCamelCase_ :
def __init__( self : str ) -> Dict:
UpperCAmelCase_ : List[Any] = ""
UpperCAmelCase_ : int = ""
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : List[Any] = 256
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : List[str] = 0
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Dict ) -> Optional[Any]:
UpperCAmelCase_ : Dict = cva.imread(lowerCAmelCase_ , 0 )
UpperCAmelCase_ : Union[str, Any] = copy.deepcopy(self.img )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = plt.hist(self.img.ravel() , 256 , [0, 256] , label="x" )
UpperCAmelCase_ : List[Any] = np.sum(lowerCAmelCase_ )
for i in range(len(lowerCAmelCase_ ) ):
UpperCAmelCase_ : List[Any] = x[i] / self.k
self.sk += prk
UpperCAmelCase_ : Optional[Any] = (self.L - 1) * self.sk
if self.rem != 0:
UpperCAmelCase_ : Any = int(last % last )
UpperCAmelCase_ : List[str] = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = int(np.ma.count(self.img ) / self.img[1].size )
UpperCAmelCase_ : Dict = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCAmelCase_ : Any = self.img[j][i]
if num != self.last_list[num]:
UpperCAmelCase_ : Tuple = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
plt.hist(self.img.ravel() , 256 , [0, 256] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(5_000 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCamelCase_ = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
lowerCamelCase_ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 268 | 1 |
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
return params[f"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="attention" ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = UpperCamelCase = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
UpperCamelCase = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
UpperCamelCase = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
UpperCamelCase = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
UpperCamelCase = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
UpperCamelCase = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
UpperCamelCase = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
UpperCamelCase = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ) -> Tuple:
'''simple docstring'''
if split_mlp_wi:
UpperCamelCase = params[f"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
UpperCamelCase = params[f"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
UpperCamelCase = (wi_a, wi_a)
else:
UpperCamelCase = params[f"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
UpperCamelCase = params[f"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> str:
'''simple docstring'''
return params[f"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def lowercase( UpperCamelCase_ , *, UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = False ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = traverse_util.flatten_dict(variables["""target"""] )
UpperCamelCase = {"""/""".join(UpperCamelCase_ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
UpperCamelCase = """encoder/encoder/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , UpperCamelCase_ )
UpperCamelCase = collections.OrderedDict()
# Shared embeddings.
UpperCamelCase = old["""token_embedder/embedding"""]
# Encoder.
for i in range(UpperCamelCase_ ):
# Block i, layer 0 (Self Attention).
UpperCamelCase = tax_layer_norm_lookup(UpperCamelCase_ , UpperCamelCase_ , """encoder""" , """pre_attention_layer_norm""" )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = tax_attention_lookup(UpperCamelCase_ , UpperCamelCase_ , """encoder""" , """attention""" )
UpperCamelCase = layer_norm
UpperCamelCase = k.T
UpperCamelCase = o.T
UpperCamelCase = q.T
UpperCamelCase = v.T
# Block i, layer 1 (MLP).
UpperCamelCase = tax_layer_norm_lookup(UpperCamelCase_ , UpperCamelCase_ , """encoder""" , """pre_mlp_layer_norm""" )
UpperCamelCase , UpperCamelCase = tax_mlp_lookup(UpperCamelCase_ , UpperCamelCase_ , """encoder""" , UpperCamelCase_ )
UpperCamelCase = layer_norm
if split_mlp_wi:
UpperCamelCase = wi[0].T
UpperCamelCase = wi[1].T
else:
UpperCamelCase = wi.T
UpperCamelCase = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
UpperCamelCase = tax_relpos_bias_lookup(
UpperCamelCase_ , UpperCamelCase_ , """encoder""" ).T
UpperCamelCase = old["""encoder/encoder_norm/scale"""]
if not scalable_attention:
UpperCamelCase = tax_relpos_bias_lookup(
UpperCamelCase_ , 0 , """encoder""" ).T
UpperCamelCase = tax_relpos_bias_lookup(
UpperCamelCase_ , 0 , """decoder""" ).T
if not is_encoder_only:
# Decoder.
for i in range(UpperCamelCase_ ):
# Block i, layer 0 (Self Attention).
UpperCamelCase = tax_layer_norm_lookup(UpperCamelCase_ , UpperCamelCase_ , """decoder""" , """pre_self_attention_layer_norm""" )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = tax_attention_lookup(UpperCamelCase_ , UpperCamelCase_ , """decoder""" , """self_attention""" )
UpperCamelCase = layer_norm
UpperCamelCase = k.T
UpperCamelCase = o.T
UpperCamelCase = q.T
UpperCamelCase = v.T
# Block i, layer 1 (Cross Attention).
UpperCamelCase = tax_layer_norm_lookup(UpperCamelCase_ , UpperCamelCase_ , """decoder""" , """pre_cross_attention_layer_norm""" )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = tax_attention_lookup(UpperCamelCase_ , UpperCamelCase_ , """decoder""" , """encoder_decoder_attention""" )
UpperCamelCase = layer_norm
UpperCamelCase = k.T
UpperCamelCase = o.T
UpperCamelCase = q.T
UpperCamelCase = v.T
# Block i, layer 2 (MLP).
UpperCamelCase = tax_layer_norm_lookup(UpperCamelCase_ , UpperCamelCase_ , """decoder""" , """pre_mlp_layer_norm""" )
UpperCamelCase , UpperCamelCase = tax_mlp_lookup(UpperCamelCase_ , UpperCamelCase_ , """decoder""" , UpperCamelCase_ )
UpperCamelCase = layer_norm
if split_mlp_wi:
UpperCamelCase = wi[0].T
UpperCamelCase = wi[1].T
else:
UpperCamelCase = wi.T
UpperCamelCase = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
UpperCamelCase = tax_relpos_bias_lookup(UpperCamelCase_ , UpperCamelCase_ , """decoder""" ).T
UpperCamelCase = old["""decoder/decoder_norm/scale"""]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
UpperCamelCase = old["""decoder/logits_dense/kernel"""].T
return new
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Any:
'''simple docstring'''
UpperCamelCase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
UpperCamelCase = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
UpperCamelCase = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
UpperCamelCase = state_dict["""shared.weight"""]
return state_dict
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = checkpoints.load_tax_checkpoint(UpperCamelCase_ )
UpperCamelCase = convert_tax_to_pytorch(
UpperCamelCase_ , num_layers=config.num_layers , is_encoder_only=UpperCamelCase_ , scalable_attention=UpperCamelCase_ )
UpperCamelCase = make_state_dict(UpperCamelCase_ , UpperCamelCase_ )
model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = False , UpperCamelCase_ = False , ) -> List[str]:
'''simple docstring'''
UpperCamelCase = MTaConfig.from_json_file(UpperCamelCase_ )
print(f"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
UpperCamelCase = UMTaEncoderModel(UpperCamelCase_ )
else:
UpperCamelCase = UMTaForConditionalGeneration(UpperCamelCase_ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(UpperCamelCase_ )
# Verify that we can load the checkpoint.
model.from_pretrained(UpperCamelCase_ )
print("""Done""" )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 368 | from ....configuration_utils import PretrainedConfig
from ....utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
# TODO: upload to AWS
_SCREAMING_SNAKE_CASE = {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = """retribert"""
def __init__( self : Optional[Any] , lowerCamelCase_ : Any=3_0522 , lowerCamelCase_ : List[Any]=768 , lowerCamelCase_ : List[str]=8 , lowerCamelCase_ : Optional[int]=12 , lowerCamelCase_ : str=3072 , lowerCamelCase_ : List[str]="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Dict=512 , lowerCamelCase_ : str=2 , lowerCamelCase_ : int=0.0_2 , lowerCamelCase_ : Any=1E-12 , lowerCamelCase_ : int=True , lowerCamelCase_ : Optional[Any]=128 , lowerCamelCase_ : Optional[Any]=0 , **lowerCamelCase_ : List[str] , ):
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = share_encoders
UpperCamelCase = projection_dim
| 165 | 0 |
'''simple docstring'''
from __future__ import annotations
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> list[str]:
if nth_term == "":
return [""]
__lowerCamelCase : Any = int(__UpperCamelCase )
__lowerCamelCase : Dict = int(__UpperCamelCase )
__lowerCamelCase : Any = []
for temp in range(int(__UpperCamelCase ) ):
series.append(F'1 / {pow(temp + 1 ,int(__UpperCamelCase ) )}' if series else '1' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCamelCase = int(input('Enter the last number (nth term) of the P-Series'))
_UpperCamelCase = int(input('Enter the power for P-Series'))
print('Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p')
print(p_series(nth_term, power))
| 208 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
SCREAMING_SNAKE_CASE__ = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n'
SCREAMING_SNAKE_CASE__ = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n'
SCREAMING_SNAKE_CASE__ = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def A__ ( self ) -> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {
"""id""": datasets.Value("""string""" ),
"""prediction_text""": datasets.features.Sequence(datasets.Value("""string""" ) ),
},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://www.atticusprojectai.org/cuad"""] , reference_urls=["""https://www.atticusprojectai.org/cuad"""] , )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
UpperCamelCase = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
UpperCamelCase = evaluate(dataset=_SCREAMING_SNAKE_CASE , predictions=_SCREAMING_SNAKE_CASE )
return score
| 321 | 0 |
'''simple docstring'''
def _a ( _lowercase : list[int] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = []
if len(_lowercase ) == 1:
return [nums.copy()]
for _ in range(len(_lowercase ) ):
__UpperCAmelCase : Any = nums.pop(0 )
__UpperCAmelCase : int = permute(_lowercase )
for perm in permutations:
perm.append(_lowercase )
result.extend(_lowercase )
nums.append(_lowercase )
return result
def _a ( _lowercase : Dict ):
'''simple docstring'''
def backtrack(_lowercase : Union[str, Any] ):
if start == len(_lowercase ) - 1:
output.append(nums[:] )
else:
for i in range(_lowercase , len(_lowercase ) ):
__UpperCAmelCase : int = nums[i], nums[start]
backtrack(start + 1 )
__UpperCAmelCase : int = nums[i], nums[start] # backtrack
__UpperCAmelCase : List[Any] = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
__UpperCAmelCase :Union[str, Any] = permutea([1, 2, 3])
print(res)
doctest.testmod() | 369 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a ( _a , _a , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = StableDiffusionXLImgaImgPipeline
SCREAMING_SNAKE_CASE : Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
SCREAMING_SNAKE_CASE : Optional[Any] = PipelineTesterMixin.required_optional_params - {"latents"}
SCREAMING_SNAKE_CASE : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase__ ( self : Any ) -> Any:
torch.manual_seed(0 )
__UpperCAmelCase : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=snake_case , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
__UpperCAmelCase : int = EulerDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , )
torch.manual_seed(0 )
__UpperCAmelCase : Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__UpperCAmelCase : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=32 , )
__UpperCAmelCase : Tuple = CLIPTextModel(snake_case )
__UpperCAmelCase : Optional[int] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=snake_case )
__UpperCAmelCase : Optional[Any] = CLIPTextModelWithProjection(snake_case )
__UpperCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=snake_case )
__UpperCAmelCase : Tuple = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''text_encoder_2''': text_encoder_a,
'''tokenizer_2''': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def lowerCamelCase__ ( self : Dict , snake_case : Optional[int] , snake_case : List[str]=0 ) -> List[str]:
__UpperCAmelCase : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case ) ).to(snake_case )
__UpperCAmelCase : Optional[Any] = image / 2 + 0.5
if str(snake_case ).startswith('''mps''' ):
__UpperCAmelCase : List[str] = torch.manual_seed(snake_case )
else:
__UpperCAmelCase : Any = torch.Generator(device=snake_case ).manual_seed(snake_case )
__UpperCAmelCase : Optional[int] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 5.0,
'''output_type''': '''numpy''',
'''strength''': 0.75,
}
return inputs
def lowerCamelCase__ ( self : Tuple ) -> Optional[int]:
__UpperCAmelCase : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : Optional[Any] = self.get_dummy_components()
__UpperCAmelCase : Any = StableDiffusionXLImgaImgPipeline(**snake_case )
__UpperCAmelCase : Union[str, Any] = sd_pipe.to(snake_case )
sd_pipe.set_progress_bar_config(disable=snake_case )
__UpperCAmelCase : Optional[int] = self.get_dummy_inputs(snake_case )
__UpperCAmelCase : int = sd_pipe(**snake_case ).images
__UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCAmelCase : Any = np.array([0.4_656, 0.4_840, 0.4_439, 0.6_698, 0.5_574, 0.4_524, 0.5_799, 0.5_943, 0.5_165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self : Any ) -> int:
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowerCamelCase__ ( self : Optional[int] ) -> List[Any]:
pass
def lowerCamelCase__ ( self : Optional[int] ) -> int:
__UpperCAmelCase : Union[str, Any] = self.get_dummy_components()
__UpperCAmelCase : List[Any] = StableDiffusionXLImgaImgPipeline(**snake_case )
__UpperCAmelCase : Tuple = sd_pipe.to(snake_case )
__UpperCAmelCase : Tuple = sd_pipe.to(snake_case )
sd_pipe.set_progress_bar_config(disable=snake_case )
# forward without prompt embeds
__UpperCAmelCase : Tuple = self.get_dummy_inputs(snake_case )
__UpperCAmelCase : Optional[Any] = 3 * ['''this is a negative prompt''']
__UpperCAmelCase : Optional[int] = negative_prompt
__UpperCAmelCase : Tuple = 3 * [inputs['''prompt''']]
__UpperCAmelCase : Optional[Any] = sd_pipe(**snake_case )
__UpperCAmelCase : int = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
__UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs(snake_case )
__UpperCAmelCase : Tuple = 3 * ['''this is a negative prompt''']
__UpperCAmelCase : str = 3 * [inputs.pop('''prompt''' )]
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Any = sd_pipe.encode_prompt(snake_case , negative_prompt=snake_case )
__UpperCAmelCase : Dict = sd_pipe(
**snake_case , prompt_embeds=snake_case , negative_prompt_embeds=snake_case , pooled_prompt_embeds=snake_case , negative_pooled_prompt_embeds=snake_case , )
__UpperCAmelCase : List[str] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : int , snake_case : Any , snake_case : str="cpu" , snake_case : Tuple=torch.floataa , snake_case : List[str]=0 ) -> Tuple:
__UpperCAmelCase : Optional[Any] = torch.Generator(device=snake_case ).manual_seed(snake_case )
__UpperCAmelCase : int = np.random.RandomState(snake_case ).standard_normal((1, 4, 64, 64) )
__UpperCAmelCase : str = torch.from_numpy(snake_case ).to(device=snake_case , dtype=snake_case )
__UpperCAmelCase : str = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[Any]:
__UpperCAmelCase : str = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
__UpperCAmelCase : str = self.get_inputs(snake_case )
__UpperCAmelCase : Dict = pipe(**snake_case ).images
__UpperCAmelCase : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__UpperCAmelCase : Tuple = np.array([0.49_493, 0.47_896, 0.40_798, 0.54_214, 0.53_212, 0.48_202, 0.47_656, 0.46_329, 0.48_506] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3 | 240 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
__lowerCAmelCase : str ={
"""facebook/nllb-moe-54B""": """https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json""",
}
class UpperCAmelCase ( lowercase_ ):
__lowercase = """nllb-moe"""
__lowercase = ["""past_key_values"""]
__lowercase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self :Optional[Any] , lowercase_ :Union[str, Any]=12_81_12 , lowercase_ :str=10_24 , lowercase_ :Union[str, Any]=12 , lowercase_ :Union[str, Any]=40_96 , lowercase_ :Union[str, Any]=16 , lowercase_ :Tuple=12 , lowercase_ :Union[str, Any]=40_96 , lowercase_ :List[str]=16 , lowercase_ :int=0.0_5 , lowercase_ :Any=0.0_5 , lowercase_ :Optional[Any]=True , lowercase_ :Tuple=True , lowercase_ :Optional[int]="relu" , lowercase_ :Optional[int]=10_24 , lowercase_ :str=0.1 , lowercase_ :Optional[Any]=0.1 , lowercase_ :List[Any]=0.0 , lowercase_ :Dict=0.0_2 , lowercase_ :Union[str, Any]=2 , lowercase_ :Optional[Any]=True , lowercase_ :int=False , lowercase_ :List[Any]="float32" , lowercase_ :List[Any]=False , lowercase_ :Optional[Any]=1_28 , lowercase_ :List[Any]=64 , lowercase_ :Dict=4 , lowercase_ :int=4 , lowercase_ :Dict=0.0_0_1 , lowercase_ :List[Any]=0.0_0_1 , lowercase_ :List[Any]="all" , lowercase_ :Optional[int]=False , lowercase_ :List[str]=False , lowercase_ :List[str]=1.0 , lowercase_ :List[Any]=0.2 , lowercase_ :List[Any]=1 , lowercase_ :List[str]=0 , lowercase_ :str=2 , lowercase_ :Tuple=False , **lowercase_ :Optional[int] , )-> int:
A__ = vocab_size
A__ = max_position_embeddings
A__ = d_model
A__ = encoder_ffn_dim
A__ = encoder_layers
A__ = encoder_attention_heads
A__ = decoder_ffn_dim
A__ = decoder_layers
A__ = decoder_attention_heads
A__ = dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = activation_function
A__ = init_std
A__ = encoder_layerdrop
A__ = decoder_layerdrop
A__ = use_cache
A__ = encoder_layers
A__ = scale_embedding # scale factor will be sqrt(d_model) if True
A__ = router_z_loss_coef
A__ = router_aux_loss_coef
A__ = decoder_sparse_step
A__ = encoder_sparse_step
A__ = num_experts
A__ = expert_capacity
A__ = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
A__ = router_dtype
A__ = router_ignore_padding_tokens
A__ = batch_prioritized_routing
A__ = second_expert_policy
A__ = normalize_router_prob_before_dropping
A__ = moe_eval_capacity_token_fraction
A__ = moe_token_dropout
A__ = output_router_logits
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
| 237 |
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def UpperCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : str = "cpu" , lowerCAmelCase__ : Union[str, None] = None ) -> None:
"""simple docstring"""
lowerCAmelCase_ : Any = torch.load(lowerCAmelCase__ , map_location=lowerCAmelCase__ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(lowerCAmelCase__ , torch.Tensor ):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin' )
lowerCAmelCase_ : str = v.half()
if save_path is None: # overwrite src_path
lowerCAmelCase_ : Dict = src_path
torch.save(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
fire.Fire(convert)
| 224 | 0 |
from manim import *
class UpperCamelCase__ ( __lowercase ):
def lowerCAmelCase (self : List[str] ):
__a : Union[str, Any] = Rectangle(height=0.5 , width=0.5 )
__a : Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__a : Optional[int] = Rectangle(height=0.25 , width=0.25 )
__a : Tuple = [mem.copy() for i in range(6 )]
__a : Dict = [mem.copy() for i in range(6 )]
__a : str = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
__a : Optional[int] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
__a : Dict = VGroup(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0 )
__a : str = Text('''CPU''' , font_size=2_4 )
__a : Union[str, Any] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(snake_case_ )
__a : List[Any] = [mem.copy() for i in range(4 )]
__a : List[Any] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
__a : List[Any] = Text('''GPU''' , font_size=2_4 )
__a : Union[str, Any] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
gpu.move_to([-1, -1, 0] )
self.add(snake_case_ )
__a : str = [mem.copy() for i in range(6 )]
__a : Optional[Any] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
__a : Tuple = Text('''Model''' , font_size=2_4 )
__a : Dict = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
model.move_to([3, -1.0, 0] )
self.add(snake_case_ )
__a : List[str] = []
__a : Any = []
for i, rect in enumerate(snake_case_ ):
__a : Dict = fill.copy().set_fill(snake_case_ , opacity=0.8 )
target.move_to(snake_case_ )
model_arr.append(snake_case_ )
__a : int = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(snake_case_ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(snake_case_ )
self.add(*snake_case_ , *snake_case_ )
__a : List[str] = [meta_mem.copy() for i in range(6 )]
__a : Union[str, Any] = [meta_mem.copy() for i in range(6 )]
__a : Union[str, Any] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
__a : Dict = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
__a : Tuple = VGroup(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0 )
__a : int = Text('''Disk''' , font_size=2_4 )
__a : Any = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
disk.move_to([-4, -1.25, 0] )
self.add(snake_case_ , snake_case_ )
__a : Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__a : Optional[int] = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(snake_case_ , snake_case_ )
__a : Dict = MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=1_8 , )
blue_text.next_to(snake_case_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(snake_case_ )
__a : Optional[int] = MarkupText(
f"Now watch as an input is passed through the model\nand how the memory is utilized and handled." , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case_ ) )
__a : Any = Square(0.3 )
input.set_fill(snake_case_ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , snake_case_ , buff=0.5 )
self.play(Write(snake_case_ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=snake_case_ , buff=0.02 )
self.play(MoveToTarget(snake_case_ ) )
self.play(FadeOut(snake_case_ ) )
__a : Dict = Arrow(start=snake_case_ , end=snake_case_ , color=snake_case_ , buff=0.5 )
a.next_to(model_arr[0].get_left() , snake_case_ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
__a : Optional[Any] = MarkupText(
f"As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back." , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case_ , run_time=3 ) )
__a : Any = {'''run_time''': 1, '''fade_in''': True, '''fade_out''': True, '''buff''': 0.02}
self.play(
Write(snake_case_ ) , Circumscribe(model_arr[0] , color=snake_case_ , **snake_case_ ) , Circumscribe(model_cpu_arr[0] , color=snake_case_ , **snake_case_ ) , Circumscribe(gpu_rect[0] , color=snake_case_ , **snake_case_ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
__a : List[Any] = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , snake_case_ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
__a : List[Any] = AnimationGroup(
FadeOut(snake_case_ , run_time=0.5 ) , MoveToTarget(snake_case_ , run_time=0.5 ) , FadeIn(snake_case_ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(snake_case_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
__a : int = 0.7
self.play(
Circumscribe(model_arr[i] , **snake_case_ ) , Circumscribe(cpu_left_col_base[i] , **snake_case_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=snake_case_ , **snake_case_ ) , Circumscribe(gpu_rect[0] , color=snake_case_ , **snake_case_ ) , Circumscribe(model_arr[i + 1] , color=snake_case_ , **snake_case_ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=snake_case_ , **snake_case_ ) , Circumscribe(cpu_left_col_base[-1] , color=snake_case_ , **snake_case_ ) , Circumscribe(gpu_rect[0] , color=snake_case_ , **snake_case_ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
__a : Any = a_c
__a : Dict = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(snake_case_ ) , FadeOut(snake_case_ , run_time=0.5 ) , )
__a : List[str] = MarkupText(f"Inference on a model too large for GPU memory\nis successfully completed." , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case_ , run_time=3 ) , MoveToTarget(snake_case_ ) )
self.wait()
| 353 |
import argparse
import os
import re
import packaging.version
lowercase__ ='examples/'
lowercase__ ={
'examples': (re.compile(R'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(R'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(R'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), R'\1version="VERSION",'),
'doc': (re.compile(R'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
lowercase__ ={
'init': 'src/transformers/__init__.py',
'setup': 'setup.py',
}
lowercase__ ='README.md'
def __UpperCamelCase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple ):
with open(lowerCAmelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__a : Tuple = f.read()
__a , __a : Optional[int] = REPLACE_PATTERNS[pattern]
__a : List[Any] = replace.replace('''VERSION''' , lowerCAmelCase__ )
__a : Any = re_pattern.sub(lowerCAmelCase__ , lowerCAmelCase__ )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : Tuple ):
for folder, directories, fnames in os.walk(lowerCAmelCase__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ , pattern='''examples''' )
def __UpperCamelCase ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if not patch:
update_version_in_examples(lowerCAmelCase__ )
def __UpperCamelCase ( ):
__a : Optional[int] = '''🤗 Transformers currently provides the following architectures'''
__a : int = '''1. Want to contribute a new model?'''
with open(lowerCAmelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__a : Tuple = f.readlines()
# Find the start of the list.
__a : Optional[int] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__a : Any = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
__a : str = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lowerCAmelCase__ )
def __UpperCamelCase ( ):
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
__a : Optional[int] = f.read()
__a : str = REPLACE_PATTERNS['''init'''][0].search(lowerCAmelCase__ ).groups()[0]
return packaging.version.parse(lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any]=False ):
__a : str = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
__a : Union[str, Any] = default_version.base_version
elif patch:
__a : Tuple = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
__a : List[str] = f"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
__a : List[str] = input(f"Which version are you releasing? [{default_version}]" )
if len(lowerCAmelCase__ ) == 0:
__a : Tuple = default_version
print(f"Updating version to {version}." )
global_version_update(lowerCAmelCase__ , patch=lowerCAmelCase__ )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def __UpperCamelCase ( ):
__a : Dict = get_version()
__a : str = f"{current_version.major}.{current_version.minor + 1}.0.dev0"
__a : Any = current_version.base_version
# Check with the user we got that right.
__a : Any = input(f"Which version are we developing now? [{dev_version}]" )
if len(lowerCAmelCase__ ) == 0:
__a : Any = dev_version
print(f"Updating version to {version}." )
global_version_update(lowerCAmelCase__ )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
lowercase__ =parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 90 | 0 |
"""simple docstring"""
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __lowerCAmelCase ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self , *_a , _a=None , _a=None , **_a ):
super().__init__(*_snake_case , **_snake_case )
__a = eval_examples
__a = post_process_function
def __UpperCAmelCase ( self , _a = None , _a=None , _a = None , _a = "eval" , **_a , ):
__a = gen_kwargs.copy()
__a = (
gen_kwargs['max_length'] if gen_kwargs.get('''max_length''' ) is not None else self.args.generation_max_length
)
__a = (
gen_kwargs['num_beams'] if gen_kwargs.get('''num_beams''' ) is not None else self.args.generation_num_beams
)
__a = gen_kwargs
__a = self.eval_dataset if eval_dataset is None else eval_dataset
__a = self.get_eval_dataloader(_snake_case )
__a = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__a = self.compute_metrics
__a = None
__a = time.time()
__a = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__a = eval_loop(
_snake_case , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_snake_case , metric_key_prefix=_snake_case , )
finally:
__a = compute_metrics
__a = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_snake_case , _snake_case , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__a = self.post_process_function(_snake_case , _snake_case , _snake_case )
__a = self.compute_metrics(_snake_case )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
__a = metrics.pop(_snake_case )
metrics.update(output.metrics )
else:
__a = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_snake_case )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__a = self.callback_handler.on_evaluate(self.args , self.state , self.control , _snake_case )
return metrics
def __UpperCAmelCase ( self , _a , _a , _a=None , _a = "test" , **_a ):
__a = gen_kwargs.copy()
__a = self.get_test_dataloader(_snake_case )
# Temporarily disable metric computation, we will do it in the loop here.
__a = self.compute_metrics
__a = None
__a = time.time()
__a = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__a = eval_loop(
_snake_case , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_snake_case , metric_key_prefix=_snake_case , )
finally:
__a = compute_metrics
__a = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_snake_case , _snake_case , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
__a = self.post_process_function(_snake_case , _snake_case , _snake_case , '''predict''' )
__a = self.compute_metrics(_snake_case )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
__a = metrics.pop(_snake_case )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_snake_case )
| 45 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
a_ :Optional[Any] = logging.getLogger(__name__)
def lowercase_ (A : List[Any] , A : List[Any] ):
# save results
if os.path.exists(A ):
if os.path.exists(os.path.join(A , 'config.json' ) ) and os.path.isfile(
os.path.join(A , 'config.json' ) ):
os.remove(os.path.join(A , 'config.json' ) )
if os.path.exists(os.path.join(A , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(A , 'pytorch_model.bin' ) ):
os.remove(os.path.join(A , 'pytorch_model.bin' ) )
else:
os.makedirs(A )
model.save_pretrained(A )
def lowercase_ (A : Any , A : Optional[Any]=False ):
snake_case__ : str = 2
if unlogit:
snake_case__ : Dict = torch.pow(A , A )
snake_case__ : Any = p * torch.log(A )
snake_case__ : Tuple = 0
return -plogp.sum(dim=-1 )
def lowercase_ (A : List[str] ):
logger.info('lv, h >\t' + '\t'.join(F'''{x + 1}''' for x in range(len(A ) ) ) )
for row in range(len(A ) ):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:d}''' for x in tensor[row].cpu().data ) )
def lowercase_ (A : Tuple , A : Optional[Any] , A : str , A : int=True , A : Optional[int]=True , A : Any=None , A : int=False ):
snake_case__ , snake_case__ : Optional[Any] = model.config.num_hidden_layers, model.config.num_attention_heads
snake_case__ : int = torch.zeros(A , A ).to(args.device )
snake_case__ : Any = torch.zeros(A , A ).to(args.device )
if head_mask is None:
snake_case__ : Dict = torch.ones(A , A ).to(args.device )
head_mask.requires_grad_(requires_grad=A )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
snake_case__ : Optional[int] = None
snake_case__ : List[Any] = 0.0
snake_case__ : str = 0.0
for step, inputs in enumerate(tqdm(A , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
snake_case__ : Union[str, Any] = tuple(t.to(args.device ) for t in inputs )
((snake_case__) , ) : Optional[Any] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
snake_case__ : Union[str, Any] = model(A , labels=A , head_mask=A )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
snake_case__ , snake_case__ , snake_case__ : Dict = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(A ):
snake_case__ : Optional[Any] = entropy(attn.detach() , A )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(A ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
snake_case__ : Union[str, Any] = 2
snake_case__ : List[Any] = torch.pow(torch.pow(A , A ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
snake_case__ : Tuple = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(A )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(A )
logger.info('Head ranked by importance scores' )
snake_case__ : Tuple = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
snake_case__ : Union[str, Any] = torch.arange(
head_importance.numel() , device=args.device )
snake_case__ : str = head_ranks.view_as(A )
print_ad_tensor(A )
return attn_entropy, head_importance, total_loss
def lowercase_ (A : Optional[int] , A : Dict , A : Optional[int] ):
snake_case__ , snake_case__ , snake_case__ : Any = compute_heads_importance(A , A , A , compute_entropy=A )
snake_case__ : Tuple = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , A , original_score * args.masking_threshold )
snake_case__ : Optional[Any] = torch.ones_like(A )
snake_case__ : Union[str, Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
snake_case__ : Dict = original_score
while current_score >= original_score * args.masking_threshold:
snake_case__ : int = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
snake_case__ : List[Any] = float('Inf' )
snake_case__ : Union[str, Any] = head_importance.view(-1 ).sort()[1]
if len(A ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
snake_case__ : int = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
snake_case__ : int = new_head_mask.view(-1 )
snake_case__ : int = 0.0
snake_case__ : Union[str, Any] = new_head_mask.view_as(A )
snake_case__ : List[str] = new_head_mask.clone().detach()
print_ad_tensor(A )
# Compute metric and head importance again
snake_case__ , snake_case__ , snake_case__ : Any = compute_heads_importance(
A , A , A , compute_entropy=A , head_mask=A )
snake_case__ : Dict = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , A , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_0_0 , )
logger.info('Final head mask' )
print_ad_tensor(A )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowercase_ (A : List[str] , A : Tuple , A : Optional[Any] , A : int ):
snake_case__ : Any = datetime.now()
snake_case__ , snake_case__ , snake_case__ : str = compute_heads_importance(
A , A , A , compute_entropy=A , compute_importance=A , head_mask=A )
snake_case__ : Tuple = 1 / loss
snake_case__ : Dict = datetime.now() - before_time
snake_case__ : Union[str, Any] = sum(p.numel() for p in model.parameters() )
snake_case__ : Optional[Any] = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(A ) )
}
for k, v in heads_to_prune.items():
if isinstance(A , A ):
snake_case__ : Any = [
v,
]
assert sum(len(A ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(A )
snake_case__ : Dict = sum(p.numel() for p in model.parameters() )
snake_case__ : Tuple = datetime.now()
snake_case__ , snake_case__ , snake_case__ : Dict = compute_heads_importance(
A , A , A , compute_entropy=A , compute_importance=A , head_mask=A , actually_pruned=A , )
snake_case__ : Any = 1 / loss
snake_case__ : int = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , A , A , pruned_num_params / original_num_params * 1_0_0 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , A , A )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_0_0 )
save_model(A , args.output_dir )
def lowercase_ ():
snake_case__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=A , type=A , required=A , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=A , type=A , required=A , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=A , type=A , required=A , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=A , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=A , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=A , type=A , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=A , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=A , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=A , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=A , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=1_2_8 , type=A , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=A , help='Batch size.' )
parser.add_argument('--seed' , type=A , default=4_2 )
parser.add_argument('--local_rank' , type=A , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=A , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=A , default='' , help='Can be used for distant debugging.' )
snake_case__ : Optional[int] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=A )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
snake_case__ : List[Any] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
snake_case__ : Optional[Any] = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
snake_case__ : int = torch.device('cuda' , args.local_rank )
snake_case__ : List[str] = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
snake_case__ : Any = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
snake_case__ : List[str] = nn.parallel.DistributedDataParallel(
A , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=A )
elif args.n_gpu > 1:
snake_case__ : Optional[int] = nn.DataParallel(A )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=A )
torch.save(A , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , A )
# Prepare dataset
snake_case__ : Optional[Any] = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
snake_case__ : List[str] = (torch.from_numpy(A ),)
snake_case__ : int = TensorDataset(*A )
snake_case__ : Union[str, Any] = RandomSampler(A )
snake_case__ : Any = DataLoader(A , sampler=A , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(A , A , A )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
snake_case__ : Dict = mask_heads(A , A , A )
prune_heads(A , A , A , A )
if __name__ == "__main__":
main()
| 277 | 0 |
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__a = 16
__a = 32
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = 16 ):
'''simple docstring'''
UpperCAmelCase_ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
UpperCAmelCase_ = DatasetDict(
{
'''train''': dataset['''train'''].select(__a ),
'''validation''': dataset['''train'''].select(__a ),
'''test''': dataset['''validation'''],
} )
def tokenize_function(_lowercase ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__a , max_length=__a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase_ = datasets.map(
__a , batched=__a , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase_ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase_ = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase_ = 8
else:
UpperCAmelCase_ = None
return tokenizer.pad(
__a , padding='''longest''' , max_length=__a , pad_to_multiple_of=__a , return_tensors='''pt''' , )
# Instantiate dataloaders.
UpperCAmelCase_ = DataLoader(
tokenized_datasets['''train'''] , shuffle=__a , collate_fn=__a , batch_size=__a )
UpperCAmelCase_ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__a , collate_fn=__a , batch_size=__a )
UpperCAmelCase_ = DataLoader(
tokenized_datasets['''test'''] , shuffle=__a , collate_fn=__a , batch_size=__a )
return train_dataloader, eval_dataloader, test_dataloader
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ = []
# Download the dataset
UpperCAmelCase_ = load_dataset('''glue''' , '''mrpc''' )
# Create our splits
UpperCAmelCase_ = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
UpperCAmelCase_ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ = config['lr']
UpperCAmelCase_ = int(config['''num_epochs'''] )
UpperCAmelCase_ = int(config['''seed'''] )
UpperCAmelCase_ = int(config['''batch_size'''] )
UpperCAmelCase_ = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase_ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase_ = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase_ = MAX_GPU_BATCH_SIZE
set_seed(__a )
# New Code #
# Create our folds:
UpperCAmelCase_ = kfold.split(np.zeros(datasets['''train'''].num_rows ) , datasets['''train''']['''label'''] )
UpperCAmelCase_ = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(__a ):
UpperCAmelCase_ = get_fold_dataloaders(
__a , __a , __a , __a , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__a )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase_ = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase_ = AdamW(params=model.parameters() , lr=__a )
# Instantiate scheduler
UpperCAmelCase_ = get_linear_schedule_with_warmup(
optimizer=__a , num_warmup_steps=100 , num_training_steps=(len(__a ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ = accelerator.prepare(
__a , __a , __a , __a , __a )
# Now we train the model
for epoch in range(__a ):
model.train()
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase_ = model(**__a )
UpperCAmelCase_ = outputs.loss
UpperCAmelCase_ = loss / gradient_accumulation_steps
accelerator.backward(__a )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ = model(**__a )
UpperCAmelCase_ = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__a , references=__a , )
UpperCAmelCase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , __a )
# New Code #
# We also run predictions on the test set at the very end
UpperCAmelCase_ = []
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ = model(**__a )
UpperCAmelCase_ = outputs.logits
UpperCAmelCase_ = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(__a , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
UpperCAmelCase_ = torch.cat(__a , dim=0 )
UpperCAmelCase_ = torch.stack(__a , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
UpperCAmelCase_ = metric.compute(predictions=__a , references=__a )
accelerator.print('''Average test metrics from all folds:''' , __a )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__a , default=__a , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
# New Code #
parser.add_argument('''--num_folds''' , type=__a , default=3 , help='''The number of splits to perform across the dataset''' )
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(__a , __a )
if __name__ == "__main__":
main() | 350 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
__a = logging.getLogger()
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = argparse.ArgumentParser()
parser.add_argument('''-f''' )
UpperCAmelCase_ : Dict = parser.parse_args()
return args.f
class __a( _a ):
"""simple docstring"""
def a__ ( self ) -> None:
UpperCAmelCase_ : int = logging.StreamHandler(sys.stdout )
logger.addHandler(_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> str:
UpperCAmelCase_ : int = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 ,'''run_glue_deebert.py''' )
with patch.object(_SCREAMING_SNAKE_CASE ,'''argv''' ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : List[str] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(_SCREAMING_SNAKE_CASE ,0.6_66 )
@slow
@require_torch_non_multi_gpu
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : List[Any] = '''
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
'''.split()
self.run_and_check(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(_SCREAMING_SNAKE_CASE ) | 235 | 0 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : List[str] = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=0 ) -> int:
'''simple docstring'''
UpperCAmelCase : Any = floats_tensor((1, 3, 128, 128) , rng=random.Random(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase : Optional[int] = np.random.RandomState(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""strength""": 0.75,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = self.get_dummy_inputs()
UpperCAmelCase : Union[str, Any] = pipe(**_SCREAMING_SNAKE_CASE ).images
UpperCAmelCase : int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase : Union[str, Any] = np.array([0.6_9643, 0.5_8484, 0.5_0314, 0.5_8760, 0.5_5368, 0.5_9643, 0.5_1529, 0.4_1217, 0.4_9087] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
UpperCAmelCase : Optional[Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = self.get_dummy_inputs()
UpperCAmelCase : Optional[Any] = pipe(**_SCREAMING_SNAKE_CASE ).images
UpperCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase : List[Any] = np.array([0.6_1737, 0.5_4642, 0.5_3183, 0.5_4465, 0.5_2742, 0.6_0525, 0.4_9969, 0.4_0655, 0.4_8154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
UpperCAmelCase : str = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
# warmup pass to apply optimizations
UpperCAmelCase : Union[str, Any] = pipe(**self.get_dummy_inputs() )
UpperCAmelCase : Optional[int] = self.get_dummy_inputs()
UpperCAmelCase : Tuple = pipe(**_SCREAMING_SNAKE_CASE ).images
UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase : Optional[int] = np.array([0.5_2761, 0.5_9977, 0.4_9033, 0.4_9619, 0.5_4282, 0.5_0311, 0.4_7600, 0.4_0918, 0.4_5203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
UpperCAmelCase : Union[str, Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = self.get_dummy_inputs()
UpperCAmelCase : Any = pipe(**_SCREAMING_SNAKE_CASE ).images
UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase : int = np.array([0.5_2911, 0.6_0004, 0.4_9229, 0.4_9805, 0.5_4502, 0.5_0680, 0.4_7777, 0.4_1028, 0.4_5304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
UpperCAmelCase : Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs()
UpperCAmelCase : List[Any] = pipe(**_SCREAMING_SNAKE_CASE ).images
UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase : List[Any] = np.array([0.5_2911, 0.6_0004, 0.4_9229, 0.4_9805, 0.5_4502, 0.5_0680, 0.4_7777, 0.4_1028, 0.4_5304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
UpperCAmelCase : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = self.get_dummy_inputs()
UpperCAmelCase : str = pipe(**_SCREAMING_SNAKE_CASE ).images
UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase : Dict = np.array([0.6_5331, 0.5_8277, 0.4_8204, 0.5_6059, 0.5_3665, 0.5_6235, 0.5_0969, 0.4_0009, 0.4_6552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@property
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : List[str] = ort.SessionOptions()
UpperCAmelCase : str = False
return options
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
UpperCAmelCase : Tuple = init_image.resize((768, 512) )
# using the PNDM scheduler by default
UpperCAmelCase : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = """A fantasy landscape, trending on artstation"""
UpperCAmelCase : str = np.random.RandomState(0 )
UpperCAmelCase : str = pipe(
prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=_SCREAMING_SNAKE_CASE , output_type="""np""" , )
UpperCAmelCase : Tuple = output.images
UpperCAmelCase : List[Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
UpperCAmelCase : Any = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
UpperCAmelCase : int = init_image.resize((768, 512) )
UpperCAmelCase : int = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
UpperCAmelCase : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = """A fantasy landscape, trending on artstation"""
UpperCAmelCase : List[str] = np.random.RandomState(0 )
UpperCAmelCase : Any = pipe(
prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=_SCREAMING_SNAKE_CASE , output_type="""np""" , )
UpperCAmelCase : List[str] = output.images
UpperCAmelCase : Dict = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
UpperCAmelCase : Tuple = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 109 |
"""simple docstring"""
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _snake_case ( *UpperCamelCase : str , UpperCamelCase : Optional[Union[Dict, Any]] = None , UpperCamelCase : Tuple=True , UpperCamelCase : Optional[int]=2 ):
from .. import __version__
UpperCAmelCase : Tuple = take_from
UpperCAmelCase : Optional[Any] = ()
if not isinstance(args[0] , UpperCamelCase ):
UpperCAmelCase : List[str] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(UpperCamelCase ).base_version ) >= version.parse(UpperCamelCase ):
raise ValueError(
F"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"
F" version {__version__} is >= {version_name}" )
UpperCAmelCase : Optional[int] = None
if isinstance(UpperCamelCase , UpperCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(UpperCamelCase ),)
UpperCAmelCase : List[str] = F"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
elif hasattr(UpperCamelCase , UpperCamelCase ):
values += (getattr(UpperCamelCase , UpperCamelCase ),)
UpperCAmelCase : List[Any] = F"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
elif deprecated_kwargs is None:
UpperCAmelCase : int = F"`{attribute}` is deprecated and will be removed in version {version_name}."
if warning is not None:
UpperCAmelCase : Optional[Any] = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , UpperCamelCase , stacklevel=UpperCamelCase )
if isinstance(UpperCamelCase , UpperCamelCase ) and len(UpperCamelCase ) > 0:
UpperCAmelCase : Optional[int] = inspect.getouterframes(inspect.currentframe() )[1]
UpperCAmelCase : Union[str, Any] = call_frame.filename
UpperCAmelCase : List[Any] = call_frame.lineno
UpperCAmelCase : List[str] = call_frame.function
UpperCAmelCase , UpperCAmelCase : Optional[int] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" )
if len(UpperCamelCase ) == 0:
return
elif len(UpperCamelCase ) == 1:
return values[0]
return values
| 109 | 1 |
from __future__ import annotations
def UpperCamelCase_( _snake_case : list[int] , _snake_case : int , _snake_case : int , _snake_case : int ):
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
__a , __a =array[indexa], array[indexa]
def UpperCamelCase_( _snake_case : list[int] , _snake_case : int , _snake_case : int , _snake_case : int ):
"""simple docstring"""
if length > 1:
__a =int(length / 2 )
for i in range(_snake_case , low + middle ):
comp_and_swap(_snake_case , _snake_case , i + middle , _snake_case )
bitonic_merge(_snake_case , _snake_case , _snake_case , _snake_case )
bitonic_merge(_snake_case , low + middle , _snake_case , _snake_case )
def UpperCamelCase_( _snake_case : list[int] , _snake_case : int , _snake_case : int , _snake_case : int ):
"""simple docstring"""
if length > 1:
__a =int(length / 2 )
bitonic_sort(_snake_case , _snake_case , _snake_case , 1 )
bitonic_sort(_snake_case , low + middle , _snake_case , 0 )
bitonic_merge(_snake_case , _snake_case , _snake_case , _snake_case )
if __name__ == "__main__":
_lowerCAmelCase : Tuple = input("Enter numbers separated by a comma:\n").strip()
_lowerCAmelCase : Optional[int] = [int(item.strip()) for item in user_input.split(",")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("\nSorted array in ascending order is: ", end="")
print(*unsorted, sep=", ")
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("Sorted array in descending order is: ", end="")
print(*unsorted, sep=", ")
| 308 |
import os
def UpperCamelCase_( _snake_case : str = "input.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(_snake_case ) , _snake_case ) ) as input_file:
__a =[
[int(_snake_case ) for element in line.split(',' )]
for line in input_file.readlines()
]
__a =len(_snake_case )
__a =len(matrix[0] )
__a =[[-1 for _ in range(_snake_case )] for _ in range(_snake_case )]
for i in range(_snake_case ):
__a =matrix[i][0]
for j in range(1 , _snake_case ):
for i in range(_snake_case ):
__a =minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , _snake_case ):
__a =min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
__a =min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 308 | 1 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
_lowerCamelCase : Optional[int] = False
class lowercase ( unittest.TestCase):
pass
@nightly
@require_torch_gpu
class lowercase ( unittest.TestCase):
def a_ ( self : str ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self : Dict ):
"""simple docstring"""
A_ : Optional[int] = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
A_ : Union[str, Any] = '''A painting of a squirrel eating a burger '''
A_ : str = torch.manual_seed(0 )
A_ : int = pipe(
prompt=_lowerCamelCase , generator=_lowerCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_lowerCamelCase )
A_ : Any = VersatileDiffusionTextToImagePipeline.from_pretrained(_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
A_ : int = generator.manual_seed(0 )
A_ : Union[str, Any] = pipe(
prompt=_lowerCamelCase , generator=_lowerCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def a_ ( self : Tuple ):
"""simple docstring"""
A_ : List[str] = VersatileDiffusionTextToImagePipeline.from_pretrained(
'''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
A_ : Optional[Any] = '''A painting of a squirrel eating a burger '''
A_ : List[Any] = torch.manual_seed(0 )
A_ : Any = pipe(
prompt=_lowerCamelCase , generator=_lowerCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
A_ : List[str] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
A_ : Any = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 167 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class lowercase ( unittest.TestCase):
def a_ ( self : List[str] ):
"""simple docstring"""
A_ : Tuple = tempfile.mkdtemp()
# fmt: off
A_ : List[Any] = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
A_ : Tuple = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
A_ : Optional[int] = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
A_ : Tuple = {'''unk_token''': '''<unk>'''}
A_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
A_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_lowerCamelCase ) )
A_ : str = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48145466, 0.4578275, 0.40821073],
'''image_std''': [0.26862954, 0.26130258, 0.27577711],
}
A_ : str = os.path.join(self.tmpdirname , _lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
def a_ ( self : Any , **_lowerCamelCase : Dict ):
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def a_ ( self : Dict , **_lowerCamelCase : Optional[int] ):
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def a_ ( self : List[str] , **_lowerCamelCase : List[Any] ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def a_ ( self : int ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a_ ( self : List[str] ):
"""simple docstring"""
A_ : Dict = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A_ : Dict = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a_ ( self : List[str] ):
"""simple docstring"""
A_ : int = self.get_tokenizer()
A_ : int = self.get_rust_tokenizer()
A_ : Optional[Any] = self.get_image_processor()
A_ : Union[str, Any] = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
A_ : List[Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCamelCase )
A_ : Optional[Any] = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
A_ : Any = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , _lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , _lowerCamelCase )
def a_ ( self : str ):
"""simple docstring"""
A_ : Tuple = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ : Tuple = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
A_ : Dict = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 )
A_ : List[Any] = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCamelCase )
def a_ ( self : int ):
"""simple docstring"""
A_ : List[str] = self.get_image_processor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : Union[str, Any] = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : Tuple = self.prepare_image_inputs()
A_ : Dict = image_processor(_lowerCamelCase , return_tensors='''np''' )
A_ : Optional[int] = processor(images=_lowerCamelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def a_ ( self : str ):
"""simple docstring"""
A_ : Optional[int] = self.get_image_processor()
A_ : int = self.get_tokenizer()
A_ : int = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : Union[str, Any] = '''lower newer'''
A_ : int = processor(text=_lowerCamelCase )
A_ : Any = tokenizer(_lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a_ ( self : str ):
"""simple docstring"""
A_ : str = self.get_image_processor()
A_ : List[Any] = self.get_tokenizer()
A_ : Tuple = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : Union[str, Any] = '''lower newer'''
A_ : Optional[Any] = self.prepare_image_inputs()
A_ : Dict = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ : Optional[int] = self.get_image_processor()
A_ : int = self.get_tokenizer()
A_ : Any = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : Tuple = self.prepare_image_inputs()
A_ : Tuple = self.prepare_image_inputs()
A_ : Optional[int] = processor(images=_lowerCamelCase , visual_prompt=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''conditional_pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ : Optional[int] = self.get_image_processor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : Optional[Any] = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : List[str] = processor.batch_decode(_lowerCamelCase )
A_ : str = tokenizer.batch_decode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
| 167 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( a_: list[int] ):
if not numbers:
return 0
if not isinstance(a_, (list, tuple) ) or not all(
isinstance(a_, a_ ) for number in numbers ):
raise ValueError("numbers must be an iterable of integers" )
_UpperCAmelCase : Any = numbers[0]
for i in range(1, len(a_ ) ):
# update the maximum and minimum subarray products
_UpperCAmelCase : int = numbers[i]
if number < 0:
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = min_till_now, max_till_now
_UpperCAmelCase : str = max(a_, max_till_now * number )
_UpperCAmelCase : List[str] = min(a_, min_till_now * number )
# update the maximum product found till now
_UpperCAmelCase : int = max(a_, a_ )
return max_prod | 17 | '''simple docstring'''
import baseaa
def __UpperCAmelCase ( a_: str ):
return baseaa.baaencode(string.encode("utf-8" ) )
def __UpperCAmelCase ( a_: bytes ):
return baseaa.baadecode(a_ ).decode("utf-8" )
if __name__ == "__main__":
__a = 'Hello World!'
__a = baseaa_encode(test)
print(encoded)
__a = baseaa_decode(encoded)
print(decoded) | 17 | 1 |
import os
def UpperCAmelCase_ ( __snake_case = "input.txt" ) -> int:
"""simple docstring"""
with open(os.path.join(os.path.dirname(__snake_case ) , __snake_case ) ) as input_file:
_lowercase =[
[int(__snake_case ) for element in line.split(''',''' )]
for line in input_file.readlines()
]
_lowercase =len(__snake_case )
_lowercase =len(matrix[0] )
_lowercase =[[-1 for _ in range(__snake_case )] for _ in range(__snake_case )]
for i in range(__snake_case ):
_lowercase =matrix[i][0]
for j in range(1 , __snake_case ):
for i in range(__snake_case ):
_lowercase =minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , __snake_case ):
_lowercase =min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
_lowercase =min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
def UpperCAmelCase_ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
_lowercase =MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
_lowercase =[144, 192, 240]
_lowercase =[16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
_lowercase =[96, 120, 144]
_lowercase =[16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
_lowercase =[64, 80, 96]
_lowercase =[16, 16, 24, 48, 64, 80, 320]
_lowercase =0.05
_lowercase =2.0
if mobilevit_name.startswith('''deeplabv3_''' ):
_lowercase =512
_lowercase =16
_lowercase =21
_lowercase ='''pascal-voc-id2label.json'''
else:
_lowercase =1000
_lowercase ='''imagenet-1k-id2label.json'''
_lowercase ='''huggingface/label-files'''
_lowercase =json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='''dataset''' ) , '''r''' ) )
_lowercase ={int(__snake_case ): v for k, v in idalabel.items()}
_lowercase =idalabel
_lowercase ={v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase_ ( __snake_case , __snake_case=False ) -> Tuple:
"""simple docstring"""
for i in range(1 , 6 ):
if F"layer_{i}." in name:
_lowercase =name.replace(F"layer_{i}." , F"encoder.layer.{i - 1}." )
if "conv_1." in name:
_lowercase =name.replace('''conv_1.''' , '''conv_stem.''' )
if ".block." in name:
_lowercase =name.replace('''.block.''' , '''.''' )
if "exp_1x1" in name:
_lowercase =name.replace('''exp_1x1''' , '''expand_1x1''' )
if "red_1x1" in name:
_lowercase =name.replace('''red_1x1''' , '''reduce_1x1''' )
if ".local_rep.conv_3x3." in name:
_lowercase =name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''' )
if ".local_rep.conv_1x1." in name:
_lowercase =name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''' )
if ".norm." in name:
_lowercase =name.replace('''.norm.''' , '''.normalization.''' )
if ".conv." in name:
_lowercase =name.replace('''.conv.''' , '''.convolution.''' )
if ".conv_proj." in name:
_lowercase =name.replace('''.conv_proj.''' , '''.conv_projection.''' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F".{i}.{j}." in name:
_lowercase =name.replace(F".{i}.{j}." , F".{i}.layer.{j}." )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F".{i}.{j}." in name:
_lowercase =name.replace(F".{i}.{j}." , F".{i}." )
if "expand_1x1" in name:
_lowercase =name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''' )
if "conv_3x3" in name:
_lowercase =name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''' )
if "reduce_1x1" in name:
_lowercase =name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''' )
for i in range(2 , 5 ):
if F".global_rep.{i}.weight" in name:
_lowercase =name.replace(F".global_rep.{i}.weight" , '''.layernorm.weight''' )
if F".global_rep.{i}.bias" in name:
_lowercase =name.replace(F".global_rep.{i}.bias" , '''.layernorm.bias''' )
if ".global_rep." in name:
_lowercase =name.replace('''.global_rep.''' , '''.transformer.''' )
if ".pre_norm_mha.0." in name:
_lowercase =name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''' )
if ".pre_norm_mha.1.out_proj." in name:
_lowercase =name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''' )
if ".pre_norm_ffn.0." in name:
_lowercase =name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''' )
if ".pre_norm_ffn.1." in name:
_lowercase =name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''' )
if ".pre_norm_ffn.4." in name:
_lowercase =name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''' )
if ".transformer." in name:
_lowercase =name.replace('''.transformer.''' , '''.transformer.layer.''' )
if ".aspp_layer." in name:
_lowercase =name.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in name:
_lowercase =name.replace('''.aspp_pool.''' , '''.''' )
if "seg_head." in name:
_lowercase =name.replace('''seg_head.''' , '''segmentation_head.''' )
if "segmentation_head.classifier.classifier." in name:
_lowercase =name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''' )
if "classifier.fc." in name:
_lowercase =name.replace('''classifier.fc.''' , '''classifier.''' )
elif (not base_model) and ("segmentation_head." not in name):
_lowercase ='''mobilevit.''' + name
return name
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case=False ) -> Optional[Any]:
"""simple docstring"""
if base_model:
_lowercase =''''''
else:
_lowercase ='''mobilevit.'''
for key in orig_state_dict.copy().keys():
_lowercase =orig_state_dict.pop(__snake_case )
if key[:8] == "encoder.":
_lowercase =key[8:]
if "qkv" in key:
_lowercase =key.split('''.''' )
_lowercase =int(key_split[0][6:] ) - 1
_lowercase =int(key_split[3] )
_lowercase =model.get_submodule(F"{model_prefix}encoder.layer.{layer_num}" )
_lowercase =layer.transformer.layer[transformer_num].attention.attention.all_head_size
_lowercase =(
F"{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."
)
if "weight" in key:
_lowercase =val[:dim, :]
_lowercase =val[dim : dim * 2, :]
_lowercase =val[-dim:, :]
else:
_lowercase =val[:dim]
_lowercase =val[dim : dim * 2]
_lowercase =val[-dim:]
else:
_lowercase =val
return orig_state_dict
def UpperCAmelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
_lowercase ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowercase =Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case=False ) -> int:
"""simple docstring"""
_lowercase =get_mobilevit_config(__snake_case )
# load original state_dict
_lowercase =torch.load(__snake_case , map_location='''cpu''' )
# load 🤗 model
if mobilevit_name.startswith('''deeplabv3_''' ):
_lowercase =MobileViTForSemanticSegmentation(__snake_case ).eval()
else:
_lowercase =MobileViTForImageClassification(__snake_case ).eval()
_lowercase =convert_state_dict(__snake_case , __snake_case )
model.load_state_dict(__snake_case )
# Check outputs on an image, prepared by MobileViTImageProcessor
_lowercase =MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_lowercase =image_processor(images=prepare_img() , return_tensors='''pt''' )
_lowercase =model(**__snake_case )
_lowercase =outputs.logits
if mobilevit_name.startswith('''deeplabv3_''' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
_lowercase =torch.tensor(
[
[[6.20_65, 6.12_92, 6.20_70], [6.10_79, 6.12_54, 6.17_47], [6.00_42, 6.10_71, 6.10_34]],
[[-6.92_53, -6.86_53, -7.03_98], [-7.32_18, -7.39_83, -7.36_70], [-7.19_61, -7.24_82, -7.15_69]],
[[-4.47_23, -4.43_48, -4.37_69], [-5.36_29, -5.46_32, -5.45_98], [-5.15_87, -5.34_02, -5.50_59]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
_lowercase =torch.tensor(
[
[[5.44_49, 5.57_33, 5.63_14], [5.18_15, 5.39_30, 5.59_63], [5.16_56, 5.43_33, 5.48_53]],
[[-9.44_23, -9.77_66, -9.67_14], [-9.15_81, -9.57_20, -9.55_19], [-9.10_06, -9.64_58, -9.57_03]],
[[-7.77_21, -7.37_16, -7.15_83], [-8.45_99, -8.06_24, -7.79_44], [-8.41_72, -7.83_66, -7.50_25]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
_lowercase =torch.tensor(
[
[[6.98_11, 6.97_43, 7.31_23], [7.17_77, 7.19_31, 7.39_38], [7.56_33, 7.80_50, 7.89_01]],
[[-10.55_36, -10.23_32, -10.29_24], [-10.23_36, -9.86_24, -9.59_64], [-10.88_40, -10.81_58, -10.66_59]],
[[-3.49_38, -3.06_31, -2.86_20], [-3.42_05, -2.81_35, -2.68_75], [-3.41_79, -2.79_45, -2.87_50]],
] )
else:
raise ValueError(F"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3, :3, :3] , __snake_case , atol=1e-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
_lowercase =torch.tensor([-0.98_66, 0.23_92, -1.12_41] )
elif mobilevit_name == "mobilevit_xs":
_lowercase =torch.tensor([-2.47_61, -0.93_99, -1.95_87] )
elif mobilevit_name == "mobilevit_xxs":
_lowercase =torch.tensor([-1.93_64, -1.23_27, -0.46_53] )
else:
raise ValueError(F"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3] , __snake_case , atol=1e-4 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(F"Saving model {mobilevit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__snake_case )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__snake_case )
if push_to_hub:
_lowercase ={
'''mobilevit_s''': '''mobilevit-small''',
'''mobilevit_xs''': '''mobilevit-x-small''',
'''mobilevit_xxs''': '''mobilevit-xx-small''',
'''deeplabv3_mobilevit_s''': '''deeplabv3-mobilevit-small''',
'''deeplabv3_mobilevit_xs''': '''deeplabv3-mobilevit-x-small''',
'''deeplabv3_mobilevit_xxs''': '''deeplabv3-mobilevit-xx-small''',
}
print('''Pushing to the hub...''' )
_lowercase =model_mapping[mobilevit_name]
image_processor.push_to_hub(__snake_case , organization='''apple''' )
model.push_to_hub(__snake_case , organization='''apple''' )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
UpperCAmelCase__ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 5 | 1 |
class snake_case__ :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None ) -> Tuple:
__magic_name__ : int = data
__magic_name__ : Dict = previous
__magic_name__ : str = next_node
def __str__( self ) -> str:
return F'{self.data}'
def __magic_name__ ( self ) -> int:
return self.data
def __magic_name__ ( self ) -> Optional[int]:
return self.next
def __magic_name__ ( self ) -> Any:
return self.previous
class snake_case__ :
def __init__( self , lowerCAmelCase__ ) -> int:
__magic_name__ : Tuple = head
def __iter__( self ) -> Union[str, Any]:
return self
def __magic_name__ ( self ) -> Dict:
if not self.current:
raise StopIteration
else:
__magic_name__ : Union[str, Any] = self.current.get_data()
__magic_name__ : Any = self.current.get_next()
return value
class snake_case__ :
def __init__( self ) -> Tuple:
__magic_name__ : Tuple = None # First node in list
__magic_name__ : List[Any] = None # Last node in list
def __str__( self ) -> List[Any]:
__magic_name__ : Any = self.head
__magic_name__ : List[Any] = []
while current is not None:
nodes.append(current.get_data() )
__magic_name__ : Optional[Any] = current.get_next()
return " ".join(str(lowerCAmelCase__ ) for node in nodes )
def __contains__( self , lowerCAmelCase__ ) -> Optional[int]:
__magic_name__ : int = self.head
while current:
if current.get_data() == value:
return True
__magic_name__ : int = current.get_next()
return False
def __iter__( self ) -> int:
return LinkedListIterator(self.head )
def __magic_name__ ( self ) -> List[str]:
if self.head:
return self.head.get_data()
return None
def __magic_name__ ( self ) -> Tuple:
if self.tail:
return self.tail.get_data()
return None
def __magic_name__ ( self , lowerCAmelCase__ ) -> None:
if self.head is None:
__magic_name__ : Any = node
__magic_name__ : Tuple = node
else:
self.insert_before_node(self.head , lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> None:
if self.head is None:
self.set_head(lowerCAmelCase__ )
else:
self.insert_after_node(self.tail , lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> None:
__magic_name__ : Optional[Any] = Node(lowerCAmelCase__ )
if self.head is None:
self.set_head(lowerCAmelCase__ )
else:
self.set_tail(lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
__magic_name__ : Tuple = node
__magic_name__ : Tuple = node.previous
if node.get_previous() is None:
__magic_name__ : Any = node_to_insert
else:
__magic_name__ : List[str] = node_to_insert
__magic_name__ : List[Any] = node_to_insert
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
__magic_name__ : int = node
__magic_name__ : Union[str, Any] = node.next
if node.get_next() is None:
__magic_name__ : Tuple = node_to_insert
else:
__magic_name__ : int = node_to_insert
__magic_name__ : Optional[int] = node_to_insert
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
__magic_name__ : Any = 1
__magic_name__ : List[Any] = Node(lowerCAmelCase__ )
__magic_name__ : List[Any] = self.head
while node:
if current_position == position:
self.insert_before_node(lowerCAmelCase__ , lowerCAmelCase__ )
return
current_position += 1
__magic_name__ : List[str] = node.next
self.insert_after_node(self.tail , lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Node:
__magic_name__ : Optional[int] = self.head
while node:
if node.get_data() == item:
return node
__magic_name__ : List[str] = node.get_next()
raise Exception("""Node not found""" )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Tuple:
if (node := self.get_node(lowerCAmelCase__ )) is not None:
if node == self.head:
__magic_name__ : List[Any] = self.head.get_next()
if node == self.tail:
__magic_name__ : Dict = self.tail.get_previous()
self.remove_node_pointers(lowerCAmelCase__ )
@staticmethod
def __magic_name__ ( lowerCAmelCase__ ) -> None:
if node.get_next():
__magic_name__ : Optional[Any] = node.previous
if node.get_previous():
__magic_name__ : Dict = node.next
__magic_name__ : Dict = None
__magic_name__ : Optional[int] = None
def __magic_name__ ( self ) -> List[str]:
return self.head is None
def UpperCamelCase ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 138 |
__magic_name__: List[Any] = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
__magic_name__: Dict = [{"type": "code", "content": INSTALL_CONTENT}]
__magic_name__: Optional[Any] = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 138 | 1 |
"""simple docstring"""
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A , __A=None , __A=True , __A=None , **__A ) -> Tuple:
lowerCAmelCase_ :int = parent
lowerCAmelCase_ :Dict = config_class
lowerCAmelCase_ :Union[str, Any] = has_text_modality
lowerCAmelCase_ :Dict = kwargs
lowerCAmelCase_ :Optional[Any] = common_properties
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Any = self.config_class(**self.inputs_dict )
lowerCAmelCase_ :Any = (
["""hidden_size""", """num_attention_heads""", """num_hidden_layers"""]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["""vocab_size"""] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(__A , __A ) , msg=f"""`{prop}` does not exist""" )
# Test that config has the common properties as setter
for idx, name in enumerate(__A ):
try:
setattr(__A , __A , __A )
self.parent.assertEqual(
getattr(__A , __A ) , __A , msg=f"""`{name} value {idx} expected, but was {getattr(__A , __A )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(__A ):
try:
lowerCAmelCase_ :Dict = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(__A , __A ) , __A , msg=f"""`{name} value {idx} expected, but was {getattr(__A , __A )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Optional[Any] = self.config_class(**self.inputs_dict )
lowerCAmelCase_ :Tuple = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , __A )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Optional[int] = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase_ :Optional[int] = os.path.join(__A , """config.json""" )
config_first.to_json_file(__A )
lowerCAmelCase_ :Optional[Any] = self.config_class.from_json_file(__A )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :int = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(__A )
lowerCAmelCase_ :List[Any] = self.config_class.from_pretrained(__A )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Optional[int] = self.config_class(**self.inputs_dict )
lowerCAmelCase_ :str = """test"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase_ :List[str] = os.path.join(__A , __A )
config_first.save_pretrained(__A )
lowerCAmelCase_ :Tuple = self.config_class.from_pretrained(__A , subfolder=__A )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Any = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
lowerCAmelCase_ :int = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def __lowerCAmelCase ( self ) -> Any:
if self.config_class.is_composition:
return
lowerCAmelCase_ :Dict = self.config_class()
self.parent.assertIsNotNone(__A )
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :str = copy.deepcopy(__A )
lowerCAmelCase_ :str = self.config_class(**__A )
lowerCAmelCase_ :Union[str, Any] = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("""torch_dtype""", config.torch_dtype, torch.floataa) )
elif getattr(__A , __A ) != value:
wrong_values.append((key, getattr(__A , __A ), value) )
if len(__A ) > 0:
lowerCAmelCase_ :Optional[Any] = """\n""".join([f"""- {v[0]}: got {v[1]} instead of {v[2]}""" for v in wrong_values] )
raise ValueError(f"""The following keys were not properly set in the config:\n{errors}""" )
def __lowerCAmelCase ( self ) -> List[str]:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 84 |
'''simple docstring'''
a__ : Union[str, Any] = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
UpperCamelCase__ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100000]
number //= 100000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
a__ : list[bool | None] = [None] * 1_0_0_0_0_0_0_0
a__ : Optional[Any] = True
a__ : Optional[Any] = False
def _UpperCamelCase ( __A ) -> bool:
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCamelCase__ = chain(next_number(__A ) )
UpperCamelCase__ = number_chain
while number < 10000000:
UpperCamelCase__ = number_chain
number *= 10
return number_chain
def _UpperCamelCase ( __A = 10000000 ) -> int:
'''simple docstring'''
for i in range(1 , __A ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution() = }""")
| 80 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__(self : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str=13 , _lowerCAmelCase : Union[str, Any]=7 , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : Any=True , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : str=True , _lowerCAmelCase : Optional[Any]=99 , _lowerCAmelCase : List[Any]=32 , _lowerCAmelCase : List[str]=5 , _lowerCAmelCase : List[str]=4 , _lowerCAmelCase : str=37 , _lowerCAmelCase : int="gelu" , _lowerCAmelCase : Optional[int]=0.1 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : str=512 , _lowerCAmelCase : Any=16 , _lowerCAmelCase : Dict=2 , _lowerCAmelCase : Optional[int]=0.02 , _lowerCAmelCase : Union[str, Any]=4 , ):
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_attention_mask
A = use_token_type_ids
A = use_labels
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = type_sequence_label_size
A = initializer_range
A = num_choices
def A (self : str ):
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = None
if self.use_attention_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def A (self : int ):
A = self.prepare_config_and_inputs()
A = config_and_inputs
A = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = True
__lowerCAmelCase = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def A (self : Any ):
A = FlaxRoFormerModelTester(self )
@slow
def A (self : Dict ):
for model_class_name in self.all_model_classes:
A = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=_SCREAMING_SNAKE_CASE )
A = model(np.ones((1, 1) ) )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@require_flax
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def A (self : List[Any] ):
A = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
A = jnp.array([[0, 1, 2, 3, 4, 5]] )
A = model(_SCREAMING_SNAKE_CASE )[0]
A = 5_0000
A = (1, 6, vocab_size)
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
A = jnp.array(
[[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 360 |
'''simple docstring'''
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def __a ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
if isinstance(UpperCAmelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class __UpperCAmelCase :
'''simple docstring'''
def A (self : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] ):
pass
def A (self : List[str] ):
pass
def A (self : Union[str, Any] ):
pass
def A (self : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int=None , **_lowerCAmelCase : Dict ):
A = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCAmelCase , _lowerCAmelCase )
A = TFVisionTextDualEncoderModel(_lowerCAmelCase )
A = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def A (self : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict=None , **_lowerCAmelCase : int ):
A , A = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
A = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
A = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def A (self : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str=None , **_lowerCAmelCase : List[Any] ):
A , A = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
A = {"""vision_model""": vision_model, """text_model""": text_model}
A = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCAmelCase )
A = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def A (self : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any]=None , **_lowerCAmelCase : Any ):
A , A = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
A = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
A = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
A = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCAmelCase )
A = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
A = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
A = after_output[0].numpy()
A = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1e-5 )
def A (self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Any=None , **_lowerCAmelCase : List[Any] ):
A , A = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
A = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
A = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
A = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
A = to_atuple(vision_model.config.image_size )
A = to_atuple(vision_model.config.patch_size )
A = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
A = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
A = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def A (self : List[Any] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : float ):
A = np.abs((a - b) ).max()
self.assertLessEqual(_lowerCAmelCase , _lowerCAmelCase , F"""Difference between torch and flax is {diff} (>= {tol}).""" )
def A (self : List[str] ):
A = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_lowerCAmelCase )
def A (self : Optional[int] ):
A = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_lowerCAmelCase )
def A (self : List[Any] ):
A = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_lowerCAmelCase )
def A (self : int ):
A = self.prepare_config_and_inputs()
self.check_save_load(**_lowerCAmelCase )
def A (self : int ):
A = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_lowerCAmelCase )
@slow
def A (self : Tuple ):
A , A = self.get_pretrained_model_and_inputs()
A = model_a(**_lowerCAmelCase )
A = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_lowerCAmelCase )
A = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
A = model_a(**_lowerCAmelCase )
A = after_outputs[0].numpy()
A = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1e-5 )
@require_tf
class __UpperCAmelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
def A (self : int ):
A = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" )
A = 13
A = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
A = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
A = random_attention_mask([batch_size, 4] )
A = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def A (self : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : int ):
A = TFViTModel(_lowerCAmelCase , name="""vision_model""" )
A = TFBertModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def A (self : Union[str, Any] ):
A = TFViTModelTester(self )
A = TFBertModelTester(self )
A = vit_model_tester.prepare_config_and_inputs()
A = bert_model_tester.prepare_config_and_inputs()
A , A , A = vision_config_and_inputs
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __UpperCAmelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
def A (self : Optional[int] ):
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
A = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" )
A = 13
A = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
A = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
A = random_attention_mask([batch_size, 4] )
A = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def A (self : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any]=None , **_lowerCAmelCase : Any ):
A , A = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
A = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
A = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
A = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
A = to_atuple(vision_model.config.image_size )
A = to_atuple(vision_model.config.patch_size )
A = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
A = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
A = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def A (self : Any , _lowerCAmelCase : Any , _lowerCAmelCase : str ):
A = TFDeiTModel(_lowerCAmelCase , name="""vision_model""" )
A = TFRobertaModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def A (self : str ):
A = TFDeiTModelTester(self )
A = TFRobertaModelTester(self )
A = vit_model_tester.prepare_config_and_inputs()
A = bert_model_tester.prepare_config_and_inputs()
A , A , A = vision_config_and_inputs
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __UpperCAmelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
def A (self : Dict ):
A = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" )
A = 13
A = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
A = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
A = random_attention_mask([batch_size, 4] )
A = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def A (self : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ):
A = TFCLIPVisionModel(_lowerCAmelCase , name="""vision_model""" )
A = TFBertModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def A (self : Optional[Any] ):
A = TFCLIPVisionModelTester(self )
A = TFBertModelTester(self )
A = clip_model_tester.prepare_config_and_inputs()
A = bert_model_tester.prepare_config_and_inputs()
A , A = vision_config_and_inputs
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def A (self : Any ):
A = TFVisionTextDualEncoderModel.from_pretrained(
"""clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=_lowerCAmelCase )
A = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
A = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="""np""" )
A = model(**_lowerCAmelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
A = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _lowerCAmelCase , atol=1e-3 ) )
| 337 | 0 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
_a = datasets.utils.logging.get_logger(__name__)
@dataclass
class A_ ( datasets.BuilderConfig ):
_lowercase : Optional[datasets.Features] = None
_lowercase : str = "utf-8"
_lowercase : Optional[str] = None
_lowercase : Optional[str] = None
_lowercase : bool = True # deprecated
_lowercase : Optional[int] = None # deprecated
_lowercase : int = 1_0 << 2_0 # 10MB
_lowercase : Optional[bool] = None
class A_ ( datasets.ArrowBasedBuilder ):
_lowercase : Union[str, Any] = JsonConfig
def UpperCAmelCase ( self : str ) -> str:
if self.config.block_size is not None:
logger.warning('The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead' )
__lowerCAmelCase: Union[str, Any] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.' )
if self.config.newlines_in_values is not None:
raise ValueError('The JSON loader parameter `newlines_in_values` is no longer supported' )
return datasets.DatasetInfo(features=self.config.features )
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : Union[str, Any] ) -> str:
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
__lowerCAmelCase: str = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCAmelCase , (str, list, tuple) ):
__lowerCAmelCase: List[Any] = data_files
if isinstance(UpperCAmelCase , UpperCAmelCase ):
__lowerCAmelCase: Dict = [files]
__lowerCAmelCase: str = [dl_manager.iter_files(UpperCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
__lowerCAmelCase: int = []
for split_name, files in data_files.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
__lowerCAmelCase: Dict = [files]
__lowerCAmelCase: Optional[int] = [dl_manager.iter_files(UpperCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCAmelCase , gen_kwargs={'files': files} ) )
return splits
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : pa.Table ) -> pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
__lowerCAmelCase: Optional[Any] = self.config.features.arrow_schema.field(UpperCAmelCase ).type
__lowerCAmelCase: int = pa_table.append_column(UpperCAmelCase , pa.array([None] * len(UpperCAmelCase ) , type=UpperCAmelCase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
__lowerCAmelCase: Union[str, Any] = table_cast(UpperCAmelCase , self.config.features.arrow_schema )
return pa_table
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : List[Any] ) -> Optional[Any]:
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(UpperCAmelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
__lowerCAmelCase: Any = json.load(UpperCAmelCase )
# We keep only the field we are interested in
__lowerCAmelCase: List[Any] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(UpperCAmelCase , (list, tuple) ):
__lowerCAmelCase: Any = set().union(*[row.keys() for row in dataset] )
__lowerCAmelCase: int = {col: [row.get(UpperCAmelCase ) for row in dataset] for col in keys}
else:
__lowerCAmelCase: List[Any] = dataset
__lowerCAmelCase: Optional[Any] = pa.Table.from_pydict(UpperCAmelCase )
yield file_idx, self._cast_table(UpperCAmelCase )
# If the file has one json object per line
else:
with open(UpperCAmelCase , 'rb' ) as f:
__lowerCAmelCase: Any = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
__lowerCAmelCase: Any = max(self.config.chunksize // 3_2 , 1_6 << 1_0 )
__lowerCAmelCase: str = (
self.config.encoding_errors if self.config.encoding_errors is not None else 'strict'
)
while True:
__lowerCAmelCase: Optional[Any] = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(UpperCAmelCase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
__lowerCAmelCase: Tuple = batch.decode(self.config.encoding , errors=UpperCAmelCase ).encode('utf-8' )
try:
while True:
try:
__lowerCAmelCase: Any = paj.read_json(
io.BytesIO(UpperCAmelCase ) , read_options=paj.ReadOptions(block_size=UpperCAmelCase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(UpperCAmelCase , pa.ArrowInvalid )
and "straddling" not in str(UpperCAmelCase )
or block_size > len(UpperCAmelCase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F'''Batch of {len(UpperCAmelCase )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.''' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
UpperCAmelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
__lowerCAmelCase: Optional[int] = json.load(UpperCAmelCase )
except json.JSONDecodeError:
logger.error(F'''Failed to read file \'{file}\' with error {type(UpperCAmelCase )}: {e}''' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(UpperCAmelCase , UpperCAmelCase ): # list is the only sequence type supported in JSON
try:
__lowerCAmelCase: str = set().union(*[row.keys() for row in dataset] )
__lowerCAmelCase: Optional[int] = {col: [row.get(UpperCAmelCase ) for row in dataset] for col in keys}
__lowerCAmelCase: Dict = pa.Table.from_pydict(UpperCAmelCase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(UpperCAmelCase )}: {e}''' )
raise ValueError(F'''Not able to read records in the JSON file at {file}.''' ) from None
yield file_idx, self._cast_table(UpperCAmelCase )
break
else:
logger.error(F'''Failed to read file \'{file}\' with error {type(UpperCAmelCase )}: {e}''' )
raise ValueError(
F'''Not able to read records in the JSON file at {file}. '''
F'''You should probably indicate the field of the JSON file containing your records. '''
F'''This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '''
F'''Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ''' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase )
batch_idx += 1
| 322 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_a = '''.'''
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
_a = [
'''Assert''',
'''AssignVariableOp''',
'''EmptyTensorList''',
'''MergeV2Checkpoints''',
'''ReadVariableOp''',
'''ResourceGather''',
'''RestoreV2''',
'''SaveV2''',
'''ShardedFilename''',
'''StatefulPartitionedCall''',
'''StaticRegexFullMatch''',
'''VarHandleOp''',
]
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: Optional[int] = SavedModel()
__lowerCAmelCase: str = []
with open(os.path.join(SCREAMING_SNAKE_CASE , 'utils' , 'tf_ops' , 'onnx.json' ) ) as f:
__lowerCAmelCase: List[str] = json.load(SCREAMING_SNAKE_CASE )['opsets']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(SCREAMING_SNAKE_CASE )] )
with open(SCREAMING_SNAKE_CASE , 'rb' ) as f:
saved_model.ParseFromString(f.read() )
__lowerCAmelCase: Optional[int] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
__lowerCAmelCase: List[str] = sorted(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(SCREAMING_SNAKE_CASE )
if strict and len(SCREAMING_SNAKE_CASE ) > 0:
raise Exception(f'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(SCREAMING_SNAKE_CASE ) > 0:
print(f'''Found the following incompatible ops for the opset {opset}:''' )
print(*SCREAMING_SNAKE_CASE , sep='\n' )
else:
print(f'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''')
parser.add_argument(
'''--opset''', default=1_2, type=int, help='''The ONNX opset against which the model has to be tested.'''
)
parser.add_argument(
'''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.'''
)
parser.add_argument(
'''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)'''
)
_a = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 322 | 1 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
lowercase : Optional[int] = get_tests_dir('''fixtures''')
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> List[str]:
# A mock response for an HTTP head request to emulate server down
snake_case_ : Optional[Any] = mock.Mock()
snake_case_ : Any = 500
snake_case_ : Tuple = {}
snake_case_ : int = HTTPError
snake_case_ : Tuple = {}
# Download this model to make sure it's in the cache.
snake_case_ : str = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=_SCREAMING_SNAKE_CASE ) as mock_head:
snake_case_ : Any = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" )
# This check we did call the fake head request
mock_head.assert_called()
def _lowerCAmelCase ( self ) -> Dict:
# This test is for deprecated behavior and can be removed in v5
snake_case_ : List[Any] = ViTImageProcessor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json" )
def _lowerCAmelCase ( self ) -> str:
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
# config is in subfolder, the following should not work without specifying the subfolder
snake_case_ : Optional[int] = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants" )
snake_case_ : int = AutoImageProcessor.from_pretrained(
"hf-internal-testing/stable-diffusion-all-variants" , subfolder="feature_extractor" )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@is_staging_test
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def _lowerCAmelCase ( cls ) -> str:
snake_case_ : str = TOKEN
HfFolder.save_token(_SCREAMING_SNAKE_CASE )
@classmethod
def _lowerCAmelCase ( cls ) -> List[str]:
try:
delete_repo(token=cls._token , repo_id="test-image-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-image-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-image-processor" )
except HTTPError:
pass
def _lowerCAmelCase ( self ) -> int:
snake_case_ : Dict = ViTImageProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
image_processor.push_to_hub("test-image-processor" , use_auth_token=self._token )
snake_case_ : str = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-image-processor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_SCREAMING_SNAKE_CASE , repo_id="test-image-processor" , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token )
snake_case_ : List[str] = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def _lowerCAmelCase ( self ) -> Optional[int]:
snake_case_ : int = ViTImageProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
image_processor.push_to_hub("valid_org/test-image-processor" , use_auth_token=self._token )
snake_case_ : Optional[Any] = ViTImageProcessor.from_pretrained("valid_org/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(_SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-image-processor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_SCREAMING_SNAKE_CASE , repo_id="valid_org/test-image-processor-org" , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token )
snake_case_ : Dict = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org" )
for k, v in image_processor.__dict__.items():
self.assertEqual(_SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def _lowerCAmelCase ( self ) -> Dict:
CustomImageProcessor.register_for_auto_class()
snake_case_ : List[Any] = CustomImageProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
image_processor.push_to_hub("test-dynamic-image-processor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"} , )
snake_case_ : List[str] = AutoImageProcessor.from_pretrained(
f'''{USER}/test-dynamic-image-processor''' , trust_remote_code=_SCREAMING_SNAKE_CASE )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , "CustomImageProcessor" )
| 36 |
def lowerCAmelCase__ ( _a : float , _a : float ):
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 | 1 |
'''simple docstring'''
import math
import sys
import cva
import numpy as np
def _lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =math.sqrt(snake_case__ )
_SCREAMING_SNAKE_CASE =1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def _lowerCAmelCase ( _UpperCamelCase : Any , _UpperCamelCase : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : Dict ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def _lowerCAmelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =np.zeros((kernel_size, kernel_size) )
for i in range(0 , snake_case__ ):
for j in range(0 , snake_case__ ):
_SCREAMING_SNAKE_CASE =math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(snake_case__ , snake_case__ )
def _lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict , ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =np.zeros(img.shape )
_SCREAMING_SNAKE_CASE =get_gauss_kernel(snake_case__ , snake_case__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
_SCREAMING_SNAKE_CASE =get_slice(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
_SCREAMING_SNAKE_CASE =img_s - img_s[kernel_size // 2, kernel_size // 2]
_SCREAMING_SNAKE_CASE =vec_gaussian(snake_case__ , snake_case__ )
_SCREAMING_SNAKE_CASE =np.multiply(snake_case__ , snake_case__ )
_SCREAMING_SNAKE_CASE =np.multiply(snake_case__ , snake_case__ )
_SCREAMING_SNAKE_CASE =np.sum(snake_case__ ) / np.sum(snake_case__ )
_SCREAMING_SNAKE_CASE =val
return imga
def _lowerCAmelCase ( _UpperCamelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =args[1] if args[1:] else '../image_data/lena.jpg'
_SCREAMING_SNAKE_CASE =float(args[2] ) if args[2:] else 1.0
_SCREAMING_SNAKE_CASE =float(args[3] ) if args[3:] else 1.0
if args[4:]:
_SCREAMING_SNAKE_CASE =int(args[4] )
_SCREAMING_SNAKE_CASE =kernel_size + abs(kernel_size % 2 - 1 )
else:
_SCREAMING_SNAKE_CASE =5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
lowerCamelCase : Optional[Any] = parse_args(sys.argv)
lowerCamelCase : Optional[Any] = cva.imread(filename, 0)
cva.imshow("input image", img)
lowerCamelCase : str = img / 2_5_5
lowerCamelCase : Union[str, Any] = out.astype("float32")
lowerCamelCase : Tuple = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
lowerCamelCase : List[Any] = out * 2_5_5
lowerCamelCase : List[Any] = np.uinta(out)
cva.imshow("output image", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 47 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A_ : List[Any] = logging.get_logger(__name__)
A_ : List[Any] = "▁"
A_ : str = {"vocab_file": "sentencepiece.bpe.model"}
A_ : Union[str, Any] = {
"vocab_file": {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"
),
}
}
A_ : List[str] = {
"xlm-roberta-base": 512,
"xlm-roberta-large": 512,
"xlm-roberta-large-finetuned-conll02-dutch": 512,
"xlm-roberta-large-finetuned-conll02-spanish": 512,
"xlm-roberta-large-finetuned-conll03-english": 512,
"xlm-roberta-large-finetuned-conll03-german": 512,
}
class lowerCamelCase (A__ ):
lowerCamelCase__ : Optional[int] = VOCAB_FILES_NAMES
lowerCamelCase__ : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Tuple = ['input_ids', 'attention_mask']
def __init__( self : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple="<s>" , __UpperCAmelCase : str="</s>" , __UpperCAmelCase : int="</s>" , __UpperCAmelCase : Tuple="<s>" , __UpperCAmelCase : Union[str, Any]="<unk>" , __UpperCAmelCase : List[str]="<pad>" , __UpperCAmelCase : Dict="<mask>" , __UpperCAmelCase : Optional[Dict[str, Any]] = None , **__UpperCAmelCase : Optional[int] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
SCREAMING_SNAKE_CASE__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
SCREAMING_SNAKE_CASE__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
SCREAMING_SNAKE_CASE__ = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = len(self.sp_model ) + self.fairseq_offset
SCREAMING_SNAKE_CASE__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Tuple , __UpperCAmelCase : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1]
def SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : str ) -> List[str]:
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : List[str] ) -> Tuple:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE__ = self.sp_model.PieceToId(__UpperCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : int ) -> Any:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = """""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip()
return out_string
def SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE__ = os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , """wb""" ) as fi:
SCREAMING_SNAKE_CASE__ = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 165 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def __lowerCamelCase ( __a :List[str] ) -> Tuple:
"""simple docstring"""
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def __lowerCamelCase ( __a :int ) -> Optional[int]:
"""simple docstring"""
A__ = create_tensor(__a )
A__ = gather(__a )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def __lowerCamelCase ( __a :Any ) -> Any:
"""simple docstring"""
A__ = [state.process_index]
A__ = gather_object(__a )
assert len(__a ) == state.num_processes, F'{gathered_obj}, {len(__a )} != {state.num_processes}'
assert gathered_obj == list(range(state.num_processes ) ), F'{gathered_obj} != {list(range(state.num_processes ) )}'
def __lowerCamelCase ( __a :List[str] ) -> List[str]:
"""simple docstring"""
A__ = create_tensor(__a )
A__ = broadcast(__a )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def __lowerCamelCase ( __a :Any ) -> Any:
"""simple docstring"""
if state.is_main_process:
A__ = torch.arange(state.num_processes + 1 ).to(state.device )
else:
A__ = torch.arange(state.num_processes ).to(state.device )
A__ = pad_across_processes(__a )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def __lowerCamelCase ( __a :Union[str, Any] ) -> str:
"""simple docstring"""
if state.num_processes != 2:
return
A__ = create_tensor(__a )
A__ = reduce(__a , """sum""" )
A__ = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(__a , __a ), F'{reduced_tensor} != {truth_tensor}'
def __lowerCamelCase ( __a :List[Any] ) -> List[str]:
"""simple docstring"""
if state.num_processes != 2:
return
A__ = create_tensor(__a )
A__ = reduce(__a , """mean""" )
A__ = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(__a , __a ), F'{reduced_tensor} != {truth_tensor}'
def __lowerCamelCase ( __a :str ) -> Optional[int]:
"""simple docstring"""
main()
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
A__ = PartialState()
state.print(F'State: {state}' )
state.print("""testing gather""" )
test_gather(__a )
state.print("""testing gather_object""" )
test_gather_object(__a )
state.print("""testing broadcast""" )
test_broadcast(__a )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(__a )
state.print("""testing reduce_sum""" )
test_reduce_sum(__a )
state.print("""testing reduce_mean""" )
test_reduce_mean(__a )
if __name__ == "__main__":
main()
| 276 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Optional[Any] = logging.get_logger(__name__)
A : List[str] = {
'''huggingface/time-series-transformer-tourism-monthly''': (
'''https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'''
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : Any = '''time_series_transformer'''
__lowerCamelCase : Dict = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Union[str, Any] , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : str = "student_t" , __lowerCAmelCase : str = "nll" , __lowerCAmelCase : int = 1 , __lowerCAmelCase : List[int] = [1, 2, 3, 4, 5, 6, 7] , __lowerCAmelCase : Optional[Union[str, bool]] = "mean" , __lowerCAmelCase : int = 0 , __lowerCAmelCase : int = 0 , __lowerCAmelCase : int = 0 , __lowerCAmelCase : int = 0 , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : int = 32 , __lowerCAmelCase : int = 32 , __lowerCAmelCase : int = 2 , __lowerCAmelCase : int = 2 , __lowerCAmelCase : int = 2 , __lowerCAmelCase : int = 2 , __lowerCAmelCase : bool = True , __lowerCAmelCase : str = "gelu" , __lowerCAmelCase : int = 64 , __lowerCAmelCase : float = 0.1 , __lowerCAmelCase : float = 0.1 , __lowerCAmelCase : float = 0.1 , __lowerCAmelCase : float = 0.1 , __lowerCAmelCase : float = 0.1 , __lowerCAmelCase : int = 1_00 , __lowerCAmelCase : float = 0.0_2 , __lowerCAmelCase : Optional[Any]=True , **__lowerCAmelCase : Union[str, Any] , ) -> Optional[int]:
"""simple docstring"""
A__ = prediction_length
A__ = context_length or prediction_length
A__ = distribution_output
A__ = loss
A__ = input_size
A__ = num_time_features
A__ = lags_sequence
A__ = scaling
A__ = num_dynamic_real_features
A__ = num_static_real_features
A__ = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
A__ = cardinality
else:
A__ = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
A__ = embedding_dimension
else:
A__ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
A__ = num_parallel_samples
# Transformer architecture configuration
A__ = input_size * len(__lowerCAmelCase ) + self._number_of_features
A__ = d_model
A__ = encoder_attention_heads
A__ = decoder_attention_heads
A__ = encoder_ffn_dim
A__ = decoder_ffn_dim
A__ = encoder_layers
A__ = decoder_layers
A__ = dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = encoder_layerdrop
A__ = decoder_layerdrop
A__ = activation_function
A__ = init_std
A__ = use_cache
super().__init__(is_encoder_decoder=__lowerCAmelCase , **__lowerCAmelCase )
@property
def a_ ( self : List[Any] ) -> int:
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 276 | 1 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=2 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=2 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=36 , UpperCamelCase__=2 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=16 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=6 , UpperCamelCase__=6 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=None , UpperCamelCase__=1000 , ) -> str:
lowerCamelCase : int = parent
lowerCamelCase : Optional[Any] = batch_size
lowerCamelCase : Union[str, Any] = num_channels
lowerCamelCase : Dict = image_size
lowerCamelCase : Union[str, Any] = patch_size
lowerCamelCase : Dict = is_training
lowerCamelCase : List[str] = use_input_mask
lowerCamelCase : int = use_token_type_ids
lowerCamelCase : Dict = use_labels
lowerCamelCase : Union[str, Any] = vocab_size
lowerCamelCase : List[Any] = hidden_size
lowerCamelCase : Optional[int] = num_hidden_layers
lowerCamelCase : List[str] = num_attention_heads
lowerCamelCase : int = intermediate_size
lowerCamelCase : List[str] = hidden_act
lowerCamelCase : Tuple = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : List[Any] = max_position_embeddings
lowerCamelCase : Dict = type_vocab_size
lowerCamelCase : str = type_sequence_label_size
lowerCamelCase : Tuple = initializer_range
lowerCamelCase : Dict = coordinate_size
lowerCamelCase : Tuple = shape_size
lowerCamelCase : List[Any] = num_labels
lowerCamelCase : Tuple = num_choices
lowerCamelCase : int = scope
lowerCamelCase : Optional[Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
lowerCamelCase : Dict = text_seq_length
lowerCamelCase : List[Any] = (image_size // patch_size) ** 2 + 1
lowerCamelCase : Union[str, Any] = self.text_seq_length + self.image_seq_length
def _lowercase ( self ) -> int:
lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
lowerCamelCase : Dict = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
lowerCamelCase : Optional[int] = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCamelCase : Dict = bbox[i, j, 3]
lowerCamelCase : Optional[int] = bbox[i, j, 1]
lowerCamelCase : Any = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCamelCase : List[str] = bbox[i, j, 2]
lowerCamelCase : Optional[int] = bbox[i, j, 0]
lowerCamelCase : Tuple = tmp_coordinate
lowerCamelCase : Union[str, Any] = tf.constant(UpperCamelCase__ )
lowerCamelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : Optional[int] = None
if self.use_input_mask:
lowerCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.text_seq_length] )
lowerCamelCase : Optional[int] = None
if self.use_token_type_ids:
lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
lowerCamelCase : str = None
lowerCamelCase : Optional[int] = None
if self.use_labels:
lowerCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
lowerCamelCase : Any = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
lowerCamelCase : Optional[int] = TFLayoutLMvaModel(config=UpperCamelCase__ )
# text + image
lowerCamelCase : Any = model(UpperCamelCase__ , pixel_values=UpperCamelCase__ , training=UpperCamelCase__ )
lowerCamelCase : Optional[int] = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , training=UpperCamelCase__ , )
lowerCamelCase : Any = model(UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , training=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
lowerCamelCase : Dict = model(UpperCamelCase__ , training=UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
lowerCamelCase : Union[str, Any] = model({"pixel_values": pixel_values} , training=UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
lowerCamelCase : str = self.num_labels
lowerCamelCase : Optional[Any] = TFLayoutLMvaForSequenceClassification(config=UpperCamelCase__ )
lowerCamelCase : str = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , training=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
lowerCamelCase : int = self.num_labels
lowerCamelCase : str = TFLayoutLMvaForTokenClassification(config=UpperCamelCase__ )
lowerCamelCase : Tuple = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , training=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
lowerCamelCase : Union[str, Any] = 2
lowerCamelCase : Dict = TFLayoutLMvaForQuestionAnswering(config=UpperCamelCase__ )
lowerCamelCase : Tuple = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , training=UpperCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self ) -> str:
lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
((lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase)) : List[str] = config_and_inputs
lowerCamelCase : Dict = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class UpperCamelCase__ (lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCamelCase_ : Dict = (
{"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
lowerCamelCase_ : int = False
lowerCamelCase_ : int = False
lowerCamelCase_ : str = False
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
return True
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ) -> dict:
lowerCamelCase : Optional[Any] = copy.deepcopy(UpperCamelCase__ )
if model_class in get_values(UpperCamelCase__ ):
lowerCamelCase : List[str] = {
k: tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(UpperCamelCase__ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCamelCase__ ):
lowerCamelCase : Dict = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCamelCase__ ):
lowerCamelCase : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
lowerCamelCase : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCamelCase__ ):
lowerCamelCase : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCamelCase__ ):
lowerCamelCase : Union[str, Any] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def _lowercase ( self ) -> Optional[Any]:
lowerCamelCase : Dict = TFLayoutLMvaModelTester(self )
lowerCamelCase : List[Any] = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def _lowercase ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def _lowercase ( self ) -> Optional[int]:
lowerCamelCase , lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : List[Any] = model_class(UpperCamelCase__ )
if getattr(UpperCamelCase__ , "hf_compute_loss" , UpperCamelCase__ ):
# The number of elements in the loss should be the same as the number of elements in the label
lowerCamelCase : Optional[int] = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase__ , return_labels=UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=UpperCamelCase__ )[0]
]
lowerCamelCase : Optional[Any] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
lowerCamelCase : int = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase__ , return_labels=UpperCamelCase__ )
lowerCamelCase : Dict = prepared_for_class.pop("input_ids" )
lowerCamelCase : Optional[int] = model(UpperCamelCase__ , **UpperCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
lowerCamelCase : List[Any] = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase__ , return_labels=UpperCamelCase__ )
lowerCamelCase : Dict = prepared_for_class.pop("input_ids" )
if "labels" in prepared_for_class:
lowerCamelCase : List[str] = prepared_for_class["labels"].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
lowerCamelCase : Optional[int] = -100
lowerCamelCase : Any = tf.convert_to_tensor(UpperCamelCase__ )
lowerCamelCase : Dict = model(UpperCamelCase__ , **UpperCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
lowerCamelCase : str = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase__ , return_labels=UpperCamelCase__ )
lowerCamelCase : Optional[int] = model(UpperCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
lowerCamelCase : Optional[Any] = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase__ , return_labels=UpperCamelCase__ )
# Get keys that were added with the _prepare_for_class function
lowerCamelCase : Any = prepared_for_class.keys() - inputs_dict.keys()
lowerCamelCase : Any = inspect.signature(model.call ).parameters
lowerCamelCase : List[str] = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
lowerCamelCase : Any = {0: "input_ids"}
for label_key in label_keys:
lowerCamelCase : Optional[Any] = signature_names.index(UpperCamelCase__ )
lowerCamelCase : int = label_key
lowerCamelCase : str = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
lowerCamelCase : List[str] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
lowerCamelCase : Optional[Any] = prepared_for_class[value]
lowerCamelCase : Any = tuple(UpperCamelCase__ )
# Send to model
lowerCamelCase : List[Any] = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def _lowercase ( self ) -> Optional[Any]:
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self ) -> List[str]:
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase : List[str] = type
self.model_tester.create_and_check_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self ) -> Optional[int]:
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self ) -> Any:
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self ) -> Union[str, Any]:
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
@slow
def _lowercase ( self ) -> Optional[int]:
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : Dict = TFLayoutLMvaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def A ( ) -> int:
lowerCamelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowercase ( self ) -> Union[str, Any]:
return LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase__ ) if is_vision_available() else None
@slow
def _lowercase ( self ) -> Union[str, Any]:
lowerCamelCase : Union[str, Any] = TFLayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" )
lowerCamelCase : Any = self.default_image_processor
lowerCamelCase : Optional[Any] = prepare_img()
lowerCamelCase : Dict = image_processor(images=UpperCamelCase__ , return_tensors="tf" ).pixel_values
lowerCamelCase : Union[str, Any] = tf.constant([[1, 2]] )
lowerCamelCase : Dict = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
lowerCamelCase : List[Any] = model(input_ids=UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , training=UpperCamelCase__ )
# verify the logits
lowerCamelCase : Dict = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , UpperCamelCase__ )
lowerCamelCase : Dict = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 48 |
'''simple docstring'''
import string
from math import logaa
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> int:
snake_case__ : List[str] = document.translate(
str.maketrans("""""" , """""" , string.punctuation ) ).replace("""\n""" , """""" )
snake_case__ : List[str] = document_without_punctuation.split(""" """ ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> tuple[int, int]:
snake_case__ : Dict = corpus.lower().translate(
str.maketrans("""""" , """""" , string.punctuation ) ) # strip all punctuation and replace it with ''
snake_case__ : Any = corpus_without_punctuation.split("""\n""" )
snake_case__ : int = term.lower()
return (len([doc for doc in docs if term in doc] ), len(_lowerCAmelCase ))
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> float:
if smoothing:
if n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("""df must be > 0""" )
elif n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(logaa(n / df ) , 3 )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> float:
return round(tf * idf , 3 )
| 35 | 0 |
import math
def lowerCAmelCase_ (lowerCAmelCase__: int ):
"""simple docstring"""
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
UpperCAmelCase_: Any = range(3 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def lowerCAmelCase_ (lowerCAmelCase__: str , lowerCAmelCase__: Any=1 , **lowerCAmelCase__: Tuple ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = factor * value
UpperCAmelCase_: List[str] = value
while not is_prime(lowerCAmelCase__ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **lowerCAmelCase__ )
return value
| 82 |
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
a : Optional[int] = {
'text_branch': 'text_model',
'audio_branch': 'audio_model.audio_encoder',
'attn': 'attention.self',
'self.proj': 'output.dense',
'attention.self_mask': 'attn_mask',
'mlp.fc1': 'intermediate.dense',
'mlp.fc2': 'output.dense',
'norm1': 'layernorm_before',
'norm2': 'layernorm_after',
'bn0': 'batch_norm',
}
a : Any = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc')
def lowerCAmelCase_ (lowerCAmelCase__: Optional[Any] , lowerCAmelCase__: int=False ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_: int = create_model(
"""HTSAT-tiny""" , """roberta""" , lowerCAmelCase__ , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=lowerCAmelCase__ , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def lowerCAmelCase_ (lowerCAmelCase__: List[Any] ):
"""simple docstring"""
UpperCAmelCase_: List[Any] = {}
UpperCAmelCase_: Optional[Any] = r""".*sequential.(\d+).*"""
UpperCAmelCase_: str = r""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCAmelCase_: Optional[int] = key.replace(lowerCAmelCase__ , lowerCAmelCase__ )
if re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
# replace sequential layers with list
UpperCAmelCase_: int = re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(1 )
UpperCAmelCase_: Dict = key.replace(F'sequential.{sequential_layer}.' , F'layers.{int(lowerCAmelCase__ )//3}.linear.' )
elif re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_: int = int(re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
UpperCAmelCase_: Optional[Any] = 1 if projecton_layer == 0 else 2
UpperCAmelCase_: Tuple = key.replace(F'_projection.{projecton_layer}.' , F'_projection.linear{transformers_projection_layer}.' )
if "audio" and "qkv" in key:
# split qkv into query key and value
UpperCAmelCase_: str = value
UpperCAmelCase_: Optional[int] = mixed_qkv.size(0 ) // 3
UpperCAmelCase_: Optional[int] = mixed_qkv[:qkv_dim]
UpperCAmelCase_: List[Any] = mixed_qkv[qkv_dim : qkv_dim * 2]
UpperCAmelCase_: int = mixed_qkv[qkv_dim * 2 :]
UpperCAmelCase_: str = query_layer
UpperCAmelCase_: List[Any] = key_layer
UpperCAmelCase_: str = value_layer
else:
UpperCAmelCase_: Tuple = value
return model_state_dict
def lowerCAmelCase_ (lowerCAmelCase__: Optional[Any] , lowerCAmelCase__: List[Any] , lowerCAmelCase__: Union[str, Any] , lowerCAmelCase__: List[Any]=False ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_: Union[str, Any] = init_clap(lowerCAmelCase__ , enable_fusion=lowerCAmelCase__ )
clap_model.eval()
UpperCAmelCase_: Optional[Any] = clap_model.state_dict()
UpperCAmelCase_: Optional[Any] = rename_state_dict(lowerCAmelCase__ )
UpperCAmelCase_: Dict = ClapConfig()
UpperCAmelCase_: Tuple = enable_fusion
UpperCAmelCase_: int = ClapModel(lowerCAmelCase__ )
# ignore the spectrogram embedding layer
model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
transformers_config.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not')
a : Optional[Any] = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 82 | 1 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
A: int = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : str = ['input_features']
def __init__( self , _SCREAMING_SNAKE_CASE=80 , _SCREAMING_SNAKE_CASE=16000 , _SCREAMING_SNAKE_CASE=160 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ) -> str:
'''simple docstring'''
super().__init__(
feature_size=_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , padding_value=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : int = n_fft
UpperCAmelCase : str = hop_length
UpperCAmelCase : Tuple = chunk_length
UpperCAmelCase : Optional[Any] = chunk_length * sampling_rate
UpperCAmelCase : str = self.n_samples // hop_length
UpperCAmelCase : Any = sampling_rate
UpperCAmelCase : List[Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=_SCREAMING_SNAKE_CASE , norm="""slaney""" , mel_scale="""slaney""" , )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = spectrogram(
_SCREAMING_SNAKE_CASE , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="""log10""" , )
UpperCAmelCase : List[str] = log_spec[:, :-1]
UpperCAmelCase : Any = np.maximum(_SCREAMING_SNAKE_CASE , log_spec.max() - 8.0 )
UpperCAmelCase : List[Any] = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0.0 ) -> List[np.ndarray]:
'''simple docstring'''
if attention_mask is not None:
UpperCAmelCase : Optional[Any] = np.array(_SCREAMING_SNAKE_CASE , np.intaa )
UpperCAmelCase : List[str] = []
for vector, length in zip(_SCREAMING_SNAKE_CASE , attention_mask.sum(-1 ) ):
UpperCAmelCase : str = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
UpperCAmelCase : Optional[Any] = padding_value
normed_input_values.append(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase : Dict = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "max_length" , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
F" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
F" was sampled with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
UpperCAmelCase : Tuple = isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
UpperCAmelCase : Optional[Any] = is_batched_numpy or (
isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCAmelCase : Any = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ):
UpperCAmelCase : List[str] = np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCAmelCase : List[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase : Optional[Any] = [np.asarray([raw_speech] ).T]
UpperCAmelCase : Union[str, Any] = BatchFeature({"""input_features""": raw_speech} )
# convert into correct format for padding
UpperCAmelCase : Optional[int] = self.pad(
_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , max_length=max_length if max_length else self.n_samples , truncation=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
UpperCAmelCase : Any = self.zero_mean_unit_var_norm(
padded_inputs["""input_features"""] , attention_mask=padded_inputs["""attention_mask"""] , padding_value=self.padding_value , )
UpperCAmelCase : Tuple = np.stack(padded_inputs["""input_features"""] , axis=0 )
# make sure list is in array format
UpperCAmelCase : Optional[int] = padded_inputs.get("""input_features""" ).transpose(2 , 0 , 1 )
UpperCAmelCase : int = [self._np_extract_fbank_features(_SCREAMING_SNAKE_CASE ) for waveform in input_features[0]]
if isinstance(input_features[0] , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Any = [np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features]
else:
UpperCAmelCase : Dict = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
UpperCAmelCase : Tuple = padded_inputs["""attention_mask"""][:, :: self.hop_length]
if return_tensors is not None:
UpperCAmelCase : str = padded_inputs.convert_to_tensors(_SCREAMING_SNAKE_CASE )
return padded_inputs
def SCREAMING_SNAKE_CASE ( self ) -> Dict[str, Any]:
'''simple docstring'''
UpperCAmelCase : List[str] = copy.deepcopy(self.__dict__ )
UpperCAmelCase : Optional[Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 109 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Any , A : Union[str, Any] ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
__snake_case: Dict = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(A )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: List[Any] = """sshleifer/tiny-gpt2"""
__snake_case: Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=A , multi_process=A , )
__snake_case: Dict = TensorFlowBenchmark(A )
__snake_case: List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Optional[int] = """sgugger/tiny-distilbert-classification"""
__snake_case: str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , only_pretrain_model=A , )
__snake_case: Any = TensorFlowBenchmark(A )
__snake_case: List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self : int ):
__snake_case: Dict = """sshleifer/tiny-gpt2"""
__snake_case: Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
__snake_case: int = TensorFlowBenchmark(A )
__snake_case: str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: List[Any] = """sshleifer/tiny-gpt2"""
__snake_case: int = AutoConfig.from_pretrained(A )
__snake_case: Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=A , multi_process=A , )
__snake_case: Union[str, Any] = TensorFlowBenchmark(A , [config] )
__snake_case: Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Optional[int] = """sshleifer/tiny-gpt2"""
__snake_case: Tuple = AutoConfig.from_pretrained(A )
__snake_case: Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
__snake_case: str = TensorFlowBenchmark(A , [config] )
__snake_case: str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Tuple = """sshleifer/tiny-gpt2"""
__snake_case: str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
__snake_case: Tuple = TensorFlowBenchmark(A )
__snake_case: Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase__ ( self : int ):
__snake_case: Dict = """sshleifer/tiny-gpt2"""
__snake_case: Union[str, Any] = AutoConfig.from_pretrained(A )
__snake_case: Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
__snake_case: List[str] = TensorFlowBenchmark(A , [config] )
__snake_case: Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: List[Any] = """patrickvonplaten/t5-tiny-random"""
__snake_case: List[str] = AutoConfig.from_pretrained(A )
__snake_case: List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
__snake_case: Optional[int] = TensorFlowBenchmark(A , configs=[config] )
__snake_case: Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , """Cannot do xla on CPU.""" )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = """sshleifer/tiny-gpt2"""
__snake_case: List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , use_xla=A , multi_process=A , )
__snake_case: Union[str, Any] = TensorFlowBenchmark(A )
__snake_case: List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: List[str] = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case: int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=A , save_to_csv=A , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(A , """inf_time.csv""" ) , inference_memory_csv_file=os.path.join(A , """inf_mem.csv""" ) , env_info_csv_file=os.path.join(A , """env.csv""" ) , multi_process=A , )
__snake_case: Tuple = TensorFlowBenchmark(A )
benchmark.run()
self.assertTrue(Path(os.path.join(A , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A , """env.csv""" ) ).exists() )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Any = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(A : Union[str, Any] ):
self.assertTrue(hasattr(A , """sequential""" ) )
self.assertTrue(hasattr(A , """cumulative""" ) )
self.assertTrue(hasattr(A , """current""" ) )
self.assertTrue(hasattr(A , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case: Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=A , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(A , """log.txt""" ) , log_print=A , trace_memory_line_by_line=A , eager_mode=A , multi_process=A , )
__snake_case: Dict = TensorFlowBenchmark(A )
__snake_case: List[str] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(A , """log.txt""" ) ).exists() )
| 111 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
def _lowerCAmelCase ( A__: Dict , A__: Tuple=False ):
'''simple docstring'''
UpperCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
UpperCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def _lowerCAmelCase ( A__: List[Any] , A__: Any , A__: List[Any]=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
UpperCAmelCase = ''''''
else:
UpperCAmelCase = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
UpperCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase = in_proj_bias[: config.hidden_size]
UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def _lowerCAmelCase ( A__: int ):
'''simple docstring'''
UpperCAmelCase = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def _lowerCAmelCase ( A__: Any , A__: Optional[Any] , A__: int ):
'''simple docstring'''
UpperCAmelCase = dct.pop(A__ )
UpperCAmelCase = val
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( A__: Union[str, Any] , A__: Optional[int] ):
'''simple docstring'''
UpperCAmelCase = ViTConfig()
UpperCAmelCase = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
UpperCAmelCase = True
UpperCAmelCase = int(vit_name[-12:-10] )
UpperCAmelCase = int(vit_name[-9:-6] )
else:
UpperCAmelCase = 1000
UpperCAmelCase = '''huggingface/label-files'''
UpperCAmelCase = '''imagenet-1k-id2label.json'''
UpperCAmelCase = json.load(open(hf_hub_download(A__ , A__ , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase = {int(A__ ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
UpperCAmelCase = int(vit_name[-6:-4] )
UpperCAmelCase = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('''tiny''' ):
UpperCAmelCase = 192
UpperCAmelCase = 768
UpperCAmelCase = 12
UpperCAmelCase = 3
elif vit_name[9:].startswith('''small''' ):
UpperCAmelCase = 384
UpperCAmelCase = 1536
UpperCAmelCase = 12
UpperCAmelCase = 6
else:
pass
else:
if vit_name[4:].startswith('''small''' ):
UpperCAmelCase = 768
UpperCAmelCase = 2304
UpperCAmelCase = 8
UpperCAmelCase = 8
elif vit_name[4:].startswith('''base''' ):
pass
elif vit_name[4:].startswith('''large''' ):
UpperCAmelCase = 1024
UpperCAmelCase = 4096
UpperCAmelCase = 24
UpperCAmelCase = 16
elif vit_name[4:].startswith('''huge''' ):
UpperCAmelCase = 1280
UpperCAmelCase = 5120
UpperCAmelCase = 32
UpperCAmelCase = 16
# load original model from timm
UpperCAmelCase = timm.create_model(A__ , pretrained=A__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCAmelCase = timm_model.state_dict()
if base_model:
remove_classification_head_(A__ )
UpperCAmelCase = create_rename_keys(A__ , A__ )
for src, dest in rename_keys:
rename_key(A__ , A__ , A__ )
read_in_q_k_v(A__ , A__ , A__ )
# load HuggingFace model
if vit_name[-5:] == "in21k":
UpperCAmelCase = ViTModel(A__ ).eval()
else:
UpperCAmelCase = ViTForImageClassification(A__ ).eval()
model.load_state_dict(A__ )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
UpperCAmelCase = DeiTImageProcessor(size=config.image_size )
else:
UpperCAmelCase = ViTImageProcessor(size=config.image_size )
UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCAmelCase = encoding['''pixel_values''']
UpperCAmelCase = model(A__ )
if base_model:
UpperCAmelCase = timm_model.forward_features(A__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(A__ , outputs.pooler_output , atol=1E-3 )
else:
UpperCAmelCase = timm_model(A__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(A__ , outputs.logits , atol=1E-3 )
Path(A__ ).mkdir(exist_ok=A__ )
print(F"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(A__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(A__ )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_patch16_224",
type=str,
help="Name of the ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__magic_name__ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 152 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 152 | 1 |
import argparse
import copy
def _A ( _lowercase ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = {}
with open(__snake_case ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
__UpperCamelCase = []
_list.append([line.split()[1], line.split()[2]] )
__UpperCamelCase = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
__UpperCamelCase = []
_list.append([line.split()[0], line.split()[2]] )
__UpperCamelCase = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def _A ( _lowercase , _lowercase ) -> Union[str, Any]:
"""simple docstring"""
with open(__snake_case ) as f:
__UpperCamelCase = f.read(1 )
__UpperCamelCase = start_node
__UpperCamelCase = []
__UpperCamelCase = start_node
__UpperCamelCase = 0
while visiting not in first_solution:
__UpperCamelCase = 1_00_00
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__snake_case ) and k[0] not in first_solution:
__UpperCamelCase = k[1]
__UpperCamelCase = k[0]
first_solution.append(__snake_case )
__UpperCamelCase = distance_of_first_solution + int(__snake_case )
__UpperCamelCase = best_node
first_solution.append(__snake_case )
__UpperCamelCase = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
__UpperCamelCase = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_00_00
)
return first_solution, distance_of_first_solution
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = []
for n in solution[1:-1]:
__UpperCamelCase = solution.index(__snake_case )
for kn in solution[1:-1]:
__UpperCamelCase = solution.index(__snake_case )
if n == kn:
continue
__UpperCamelCase = copy.deepcopy(__snake_case )
__UpperCamelCase = kn
__UpperCamelCase = n
__UpperCamelCase = 0
for k in _tmp[:-1]:
__UpperCamelCase = _tmp[_tmp.index(__snake_case ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
__UpperCamelCase = distance + int(i[1] )
_tmp.append(__snake_case )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
__UpperCamelCase = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda _lowercase : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = 1
__UpperCamelCase = first_solution
__UpperCamelCase = []
__UpperCamelCase = distance_of_first_solution
__UpperCamelCase = solution
while count <= iters:
__UpperCamelCase = find_neighborhood(__snake_case , __snake_case )
__UpperCamelCase = 0
__UpperCamelCase = neighborhood[index_of_best_solution]
__UpperCamelCase = len(__snake_case ) - 1
__UpperCamelCase = False
while not found:
__UpperCamelCase = 0
while i < len(__snake_case ):
if best_solution[i] != solution[i]:
__UpperCamelCase = best_solution[i]
__UpperCamelCase = solution[i]
break
__UpperCamelCase = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
__UpperCamelCase = True
__UpperCamelCase = best_solution[:-1]
__UpperCamelCase = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
__UpperCamelCase = cost
__UpperCamelCase = solution
else:
__UpperCamelCase = index_of_best_solution + 1
__UpperCamelCase = neighborhood[index_of_best_solution]
if len(__snake_case ) >= size:
tabu_list.pop(0 )
__UpperCamelCase = count + 1
return best_solution_ever, best_cost
def _A ( _lowercase=None ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = generate_neighbours(args.File )
__UpperCamelCase = generate_first_solution(
args.File , __snake_case )
__UpperCamelCase = tabu_search(
__snake_case , __snake_case , __snake_case , args.Iterations , args.Size , )
print(f'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser(description='''Tabu Search''')
parser.add_argument(
'''-f''',
'''--File''',
type=str,
help='''Path to the file containing the data''',
required=True,
)
parser.add_argument(
'''-i''',
'''--Iterations''',
type=int,
help='''How many iterations the algorithm should perform''',
required=True,
)
parser.add_argument(
'''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 310 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __lowerCamelCase ( __snake_case : str, __snake_case : dict ) -> str:
"""simple docstring"""
A__ : Optional[Any] =BeautifulSoup(requests.get(__snake_case, params=__snake_case ).content, """html.parser""" )
A__ : List[str] =soup.find("""div""", attrs={"""class""": """gs_ri"""} )
A__ : Tuple =div.find("""div""", attrs={"""class""": """gs_fl"""} ).find_all("""a""" )
return anchors[2].get_text()
if __name__ == "__main__":
__snake_case : Optional[Any] = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 2018,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 134 | 0 |
"""simple docstring"""
import unittest
from transformers import DonutProcessor
_UpperCamelCase: Any = 'naver-clova-ix/donut-base'
class a__ ( unittest.TestCase ):
def lowercase ( self : Optional[Any] ) -> Tuple:
lowercase : Any = DonutProcessor.from_pretrained(lowerCAmelCase )
def lowercase ( self : Dict ) -> Union[str, Any]:
lowercase : Tuple = {
'name': 'John Doe',
'age': '99',
'city': 'Atlanta',
'state': 'GA',
'zip': '30301',
'phone': '123-4567',
'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}],
}
lowercase : Tuple = (
'<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'
'<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'
'<s_nicknames><s_nickname>Johnny</s_nickname>'
'<sep/><s_nickname>JD</s_nickname></s_nicknames>'
)
lowercase : Any = self.processor.tokenajson(lowerCAmelCase )
self.assertDictEqual(lowerCAmelCase, lowerCAmelCase )
| 53 |
"""simple docstring"""
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
_UpperCamelCase: Tuple = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE__ )
class a__ ( SCREAMING_SNAKE_CASE__ ):
def __init__( self : int, **lowerCAmelCase : str ) -> Any:
super().__init__(**lowerCAmelCase )
requires_backends(self, 'vision' )
requires_backends(self, 'torch' )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
self.check_model_type(lowerCAmelCase )
def lowercase ( self : Optional[int], **lowerCAmelCase : int ) -> Tuple:
lowercase : List[Any] = {}
lowercase : List[str] = {}
lowercase : Optional[int] = {}
# preprocess args
if "points_per_batch" in kwargs:
lowercase : List[Any] = kwargs['points_per_batch']
if "points_per_crop" in kwargs:
lowercase : Tuple = kwargs['points_per_crop']
if "crops_n_layers" in kwargs:
lowercase : Any = kwargs['crops_n_layers']
if "crop_overlap_ratio" in kwargs:
lowercase : Dict = kwargs['crop_overlap_ratio']
if "crop_n_points_downscale_factor" in kwargs:
lowercase : str = kwargs['crop_n_points_downscale_factor']
# postprocess args
if "pred_iou_thresh" in kwargs:
lowercase : List[str] = kwargs['pred_iou_thresh']
if "stability_score_offset" in kwargs:
lowercase : List[str] = kwargs['stability_score_offset']
if "mask_threshold" in kwargs:
lowercase : str = kwargs['mask_threshold']
if "stability_score_thresh" in kwargs:
lowercase : Optional[int] = kwargs['stability_score_thresh']
if "crops_nms_thresh" in kwargs:
lowercase : Dict = kwargs['crops_nms_thresh']
if "output_rle_mask" in kwargs:
lowercase : int = kwargs['output_rle_mask']
if "output_bboxes_mask" in kwargs:
lowercase : Union[str, Any] = kwargs['output_bboxes_mask']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : List[Any], lowerCAmelCase : List[str], *lowerCAmelCase : Optional[Any], lowerCAmelCase : Dict=None, lowerCAmelCase : Union[str, Any]=None, **lowerCAmelCase : int ) -> List[str]:
return super().__call__(lowerCAmelCase, *lowerCAmelCase, num_workers=lowerCAmelCase, batch_size=lowerCAmelCase, **lowerCAmelCase )
def lowercase ( self : List[Any], lowerCAmelCase : Union[str, Any], lowerCAmelCase : Tuple=64, lowerCAmelCase : int = 0, lowerCAmelCase : float = 512 / 1500, lowerCAmelCase : Optional[int] = 32, lowerCAmelCase : Optional[int] = 1, ) -> Union[str, Any]:
lowercase : List[Any] = load_image(lowerCAmelCase )
lowercase : str = self.image_processor.size['longest_edge']
lowercase , lowercase , lowercase , lowercase : List[Any] = self.image_processor.generate_crop_boxes(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
lowercase : Any = self.image_processor(images=lowerCAmelCase, return_tensors='pt' )
with self.device_placement():
if self.framework == "pt":
lowercase : Optional[int] = self.get_inference_context()
with inference_context():
lowercase : List[str] = self._ensure_tensor_on_device(lowerCAmelCase, device=self.device )
lowercase : int = self.model.get_image_embeddings(model_inputs.pop('pixel_values' ) )
lowercase : List[Any] = image_embeddings
lowercase : Dict = grid_points.shape[1]
lowercase : Any = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '
'To return all points at once, set points_per_batch to None' )
for i in range(0, lowerCAmelCase, lowerCAmelCase ):
lowercase : Optional[int] = grid_points[:, i : i + points_per_batch, :, :]
lowercase : List[str] = input_labels[:, i : i + points_per_batch]
lowercase : Optional[Any] = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def lowercase ( self : Any, lowerCAmelCase : List[str], lowerCAmelCase : str=0.88, lowerCAmelCase : Optional[int]=0.95, lowerCAmelCase : str=0, lowerCAmelCase : Optional[int]=1, ) -> Optional[int]:
lowercase : Optional[int] = model_inputs.pop('input_boxes' )
lowercase : Any = model_inputs.pop('is_last' )
lowercase : Tuple = model_inputs.pop('original_sizes' ).tolist()
lowercase : Union[str, Any] = model_inputs.pop('reshaped_input_sizes' ).tolist()
lowercase : str = self.model(**lowerCAmelCase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
lowercase : str = model_outputs['pred_masks']
lowercase : str = self.image_processor.post_process_masks(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, binarize=lowerCAmelCase )
lowercase : Dict = model_outputs['iou_scores']
lowercase , lowercase , lowercase : int = self.image_processor.filter_masks(
masks[0], iou_scores[0], original_sizes[0], input_boxes[0], lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def lowercase ( self : Optional[Any], lowerCAmelCase : str, lowerCAmelCase : Tuple=False, lowerCAmelCase : Any=False, lowerCAmelCase : Tuple=0.7, ) -> List[str]:
lowercase : Any = []
lowercase : Optional[Any] = []
lowercase : Optional[Any] = []
for model_output in model_outputs:
all_scores.append(model_output.pop('iou_scores' ) )
all_masks.extend(model_output.pop('masks' ) )
all_boxes.append(model_output.pop('boxes' ) )
lowercase : Optional[Any] = torch.cat(lowerCAmelCase )
lowercase : List[Any] = torch.cat(lowerCAmelCase )
lowercase , lowercase , lowercase , lowercase : str = self.image_processor.post_process_for_mask_generation(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
lowercase : str = defaultdict(lowerCAmelCase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(lowerCAmelCase )
lowercase : Dict = {}
if output_rle_mask:
lowercase : Tuple = rle_mask
if output_bboxes_mask:
lowercase : Tuple = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 53 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.