code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from typing import List
import numpy as np
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {key: len(_lowerCamelCase ) for key, value in gen_kwargs.items() if isinstance(_lowerCamelCase , _lowerCamelCase )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"Sharding is ambiguous for this dataset: "
+ "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"
+ "\n".join(F"\t- key {key} has length {length}" for key, length in lists_lengths.items() )
+ "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, "
+ "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."
) )
_lowerCAmelCase : Dict = max(lists_lengths.values() , default=0 )
return max(1 , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = []
for group_idx in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
_lowerCAmelCase : Optional[int] = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
_lowerCAmelCase : Optional[int] = range(_lowerCamelCase , start + num_shards_to_add )
shards_indices_per_group.append(_lowerCamelCase )
return shards_indices_per_group
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = _number_of_shards_in_gen_kwargs(_lowerCamelCase )
if num_shards == 1:
return [dict(_lowerCamelCase )]
else:
_lowerCAmelCase : Tuple = _distribute_shards(num_shards=_lowerCamelCase , max_num_jobs=_lowerCamelCase )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(_lowerCamelCase , _lowerCamelCase )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(_lowerCamelCase ) )
]
def A ( _lowerCamelCase ):
'''simple docstring'''
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , _lowerCamelCase )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = {len(_lowerCamelCase ) for value in gen_kwargs.values() if isinstance(_lowerCamelCase , _lowerCamelCase )}
_lowerCAmelCase : List[Any] = {}
for size in list_sizes:
_lowerCAmelCase : int = list(range(_lowerCamelCase ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
_lowerCAmelCase : List[str] = dict(_lowerCamelCase )
for key, value in shuffled_kwargs.items():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Dict = [value[i] for i in indices_per_size[len(_lowerCamelCase )]]
return shuffled_kwargs
| 718 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'data2vec-vision'
def __init__( self, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.0, __a=0.0, __a=0.02, __a=1E-12, __a=224, __a=16, __a=3, __a=False, __a=False, __a=False, __a=False, __a=0.1, __a=0.1, __a=True, __a=[3, 5, 7, 11], __a=[1, 2, 3, 6], __a=True, __a=0.4, __a=256, __a=1, __a=False, __a=255, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Dict = hidden_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : int = hidden_dropout_prob
_lowerCAmelCase : Dict = attention_probs_dropout_prob
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : List[str] = layer_norm_eps
_lowerCAmelCase : Optional[int] = image_size
_lowerCAmelCase : List[Any] = patch_size
_lowerCAmelCase : Optional[Any] = num_channels
_lowerCAmelCase : str = use_mask_token
_lowerCAmelCase : List[str] = use_absolute_position_embeddings
_lowerCAmelCase : str = use_relative_position_bias
_lowerCAmelCase : List[str] = use_shared_relative_position_bias
_lowerCAmelCase : List[str] = layer_scale_init_value
_lowerCAmelCase : List[Any] = drop_path_rate
_lowerCAmelCase : Union[str, Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
_lowerCAmelCase : Tuple = out_indices
_lowerCAmelCase : Tuple = pool_scales
# auxiliary head attributes (semantic segmentation)
_lowerCAmelCase : Optional[int] = use_auxiliary_head
_lowerCAmelCase : Optional[Any] = auxiliary_loss_weight
_lowerCAmelCase : int = auxiliary_channels
_lowerCAmelCase : Optional[Any] = auxiliary_num_convs
_lowerCAmelCase : int = auxiliary_concat_input
_lowerCAmelCase : Dict = semantic_loss_ignore_index
class UpperCAmelCase_ ( a):
lowerCamelCase__ = version.parse('1.11')
@property
def snake_case__ ( self):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def snake_case__ ( self):
'''simple docstring'''
return 1E-4
| 658 | 0 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCAmelCase_ ( a , a , a):
@register_to_config
def __init__( self, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a = False, ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = nn.Embedding(__a, __a)
_lowerCAmelCase : Optional[Any] = nn.Embedding(__a, __a)
_lowerCAmelCase : int = False
_lowerCAmelCase : List[str] = nn.Dropout(p=__a)
_lowerCAmelCase : str = TaConfig(
vocab_size=__a, d_model=__a, num_heads=__a, d_kv=__a, d_ff=__a, dropout_rate=__a, feed_forward_proj=__a, is_decoder=__a, is_encoder_decoder=__a, )
_lowerCAmelCase : Tuple = nn.ModuleList()
for lyr_num in range(__a):
_lowerCAmelCase : Union[str, Any] = TaBlock(__a)
self.encoders.append(__a)
_lowerCAmelCase : Any = TaLayerNorm(__a)
_lowerCAmelCase : Any = nn.Dropout(p=__a)
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = self.token_embedder(__a)
_lowerCAmelCase : Tuple = encoder_input_tokens.shape[1]
_lowerCAmelCase : Dict = torch.arange(__a, device=encoder_input_tokens.device)
x += self.position_encoding(__a)
_lowerCAmelCase : Dict = self.dropout_pre(__a)
# inverted the attention mask
_lowerCAmelCase : Optional[Any] = encoder_input_tokens.size()
_lowerCAmelCase : Optional[Any] = self.get_extended_attention_mask(__a, __a)
for lyr in self.encoders:
_lowerCAmelCase : Tuple = lyr(__a, __a)[0]
_lowerCAmelCase : str = self.layer_norm(__a)
return self.dropout_post(__a), encoder_inputs_mask
| 719 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_snake_case = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = XLNetConfig.from_json_file(_lowerCamelCase )
_lowerCAmelCase : Any = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"Building PyTorch XLNetForSequenceClassification model from configuration: {config}" )
_lowerCAmelCase : Any = finetuning_task
_lowerCAmelCase : Any = GLUE_TASKS_NUM_LABELS[finetuning_task]
_lowerCAmelCase : Union[str, Any] = XLNetForSequenceClassification(_lowerCamelCase )
elif "squad" in finetuning_task:
_lowerCAmelCase : Union[str, Any] = finetuning_task
_lowerCAmelCase : Any = XLNetForQuestionAnswering(_lowerCamelCase )
else:
_lowerCAmelCase : Union[str, Any] = XLNetLMHeadModel(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
_lowerCAmelCase : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase )
print(F"Save PyTorch model to {os.path.abspath(_lowerCamelCase )}" )
torch.save(model.state_dict() , _lowerCamelCase )
print(F"Save configuration file to {os.path.abspath(_lowerCamelCase )}" )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
_snake_case = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 658 | 0 |
def A ( _lowerCamelCase ):
'''simple docstring'''
if number > 0:
raise ValueError("input must be a negative integer" )
_lowerCAmelCase : int = len(bin(_lowerCamelCase )[3:] )
_lowerCAmelCase : Any = bin(abs(_lowerCamelCase ) - (1 << binary_number_length) )[3:]
_lowerCAmelCase : Optional[Any] = (
(
"1"
+ "0" * (binary_number_length - len(_lowerCamelCase ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
_snake_case = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
_snake_case = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
_snake_case = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def snake_case__ ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string", id="token"), id="sequence"),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string", id="token"), id="sequence"), id="references"),
}), codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"], reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
], )
def snake_case__ ( self, __a, __a, __a=4, __a=False):
'''simple docstring'''
_lowerCAmelCase : List[str] = compute_bleu(
reference_corpus=__a, translation_corpus=__a, max_order=__a, smooth=__a)
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Dict = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 658 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_snake_case = {"configuration_speech_encoder_decoder": ["SpeechEncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["SpeechEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["FlaxSpeechEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 721 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Dict = OmegaConf.load(_lowerCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(_lowerCamelCase ) ) )
return config
def A ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
if conf_path is None:
_lowerCAmelCase : Union[str, Any] = "./model_checkpoints/vqgan_only.yaml"
_lowerCAmelCase : Tuple = load_config(_lowerCamelCase , display=_lowerCamelCase )
_lowerCAmelCase : str = VQModel(**config.model.params )
if ckpt_path is None:
_lowerCAmelCase : Optional[int] = "./model_checkpoints/vqgan_only.pt"
_lowerCAmelCase : int = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
if ".ckpt" in ckpt_path:
_lowerCAmelCase : List[Any] = sd["state_dict"]
model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
model.to(_lowerCamelCase )
del sd
return model
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = model.encode(_lowerCamelCase )
print(F"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
_lowerCAmelCase : int = model.decode(_lowerCamelCase )
return xrec
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[str] = string.rsplit("." , 1 )
if reload:
_lowerCAmelCase : Dict = importlib.import_module(_lowerCamelCase )
importlib.reload(_lowerCamelCase )
return getattr(importlib.import_module(_lowerCamelCase , package=_lowerCamelCase ) , cls )
def A ( _lowerCamelCase ):
'''simple docstring'''
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" , {} ) )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True , _lowerCamelCase=True ):
'''simple docstring'''
_lowerCAmelCase : str = instantiate_from_config(_lowerCamelCase )
if sd is not None:
model.load_state_dict(_lowerCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if ckpt:
_lowerCAmelCase : Optional[int] = torch.load(_lowerCamelCase , map_location="cpu" )
_lowerCAmelCase : int = pl_sd["global_step"]
print(F"loaded model from global step {global_step}." )
else:
_lowerCAmelCase : Optional[int] = {"state_dict": None}
_lowerCAmelCase : Any = None
_lowerCAmelCase : Optional[int] = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=_lowerCamelCase , eval_mode=_lowerCamelCase )["model"]
return model, global_step
| 658 | 0 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"nielsr/canine-s": 2048,
}
# Unicode defines 1,114,112 total “codepoints”
_snake_case = 111_4112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
_snake_case = 0
_snake_case = 0xe_0_0_0
_snake_case = 0xe_0_0_1
_snake_case = 0xe_0_0_2
_snake_case = 0xe_0_0_3
_snake_case = 0xe_0_0_4
# Maps special codepoints to human-readable names.
_snake_case = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
_snake_case = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self, __a=chr(__a), __a=chr(__a), __a=chr(__a), __a=chr(__a), __a=chr(__a), __a=chr(__a), __a=False, __a=2048, **__a, ):
'''simple docstring'''
_lowerCAmelCase : str = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else bos_token
_lowerCAmelCase : List[Any] = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else eos_token
_lowerCAmelCase : Optional[Any] = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else sep_token
_lowerCAmelCase : Tuple = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else cls_token
_lowerCAmelCase : str = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase : Tuple = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else mask_token
super().__init__(
bos_token=__a, eos_token=__a, sep_token=__a, cls_token=__a, pad_token=__a, mask_token=__a, add_prefix_space=__a, model_max_length=__a, **__a, )
# Creates a mapping for looking up the IDs of special symbols.
_lowerCAmelCase : Dict[str, int] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
_lowerCAmelCase : Dict = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
_lowerCAmelCase : Dict[int, str] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
_lowerCAmelCase : Optional[int] = UNICODE_VOCAB_SIZE
_lowerCAmelCase : Any = len(self._special_codepoints)
@property
def snake_case__ ( self):
'''simple docstring'''
return self._unicode_vocab_size
def snake_case__ ( self, __a):
'''simple docstring'''
return list(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
try:
return ord(__a)
except TypeError:
raise ValueError(f"invalid token: '{token}'")
def snake_case__ ( self, __a):
'''simple docstring'''
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(__a)
except TypeError:
raise ValueError(f"invalid id: {index}")
def snake_case__ ( self, __a):
'''simple docstring'''
return "".join(__a)
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Any = [self.sep_token_id]
_lowerCAmelCase : Optional[Any] = [self.cls_token_id]
_lowerCAmelCase : Optional[Any] = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def snake_case__ ( self, __a, __a = None, __a = False):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a, token_ids_a=__a, already_has_special_tokens=__a)
_lowerCAmelCase : Union[str, Any] = [1] + ([0] * len(__a)) + [1]
if token_ids_a is not None:
result += ([0] * len(__a)) + [1]
return result
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Any = [self.sep_token_id]
_lowerCAmelCase : int = [self.cls_token_id]
_lowerCAmelCase : Any = len(cls + token_ids_a + sep) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep) * [1]
return result
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
return ()
| 700 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'roc_bert'
def __init__( self, __a=3_0522, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=2, __a=0.02, __a=1E-12, __a=True, __a=0, __a="absolute", __a=None, __a=True, __a=True, __a=768, __a=910, __a=512, __a=2_4858, __a=True, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = vocab_size
_lowerCAmelCase : Optional[Any] = max_position_embeddings
_lowerCAmelCase : Union[str, Any] = hidden_size
_lowerCAmelCase : str = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Optional[Any] = type_vocab_size
_lowerCAmelCase : int = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = use_cache
_lowerCAmelCase : Optional[int] = enable_pronunciation
_lowerCAmelCase : Dict = enable_shape
_lowerCAmelCase : Optional[Any] = pronunciation_embed_dim
_lowerCAmelCase : Any = pronunciation_vocab_size
_lowerCAmelCase : List[str] = shape_embed_dim
_lowerCAmelCase : int = shape_vocab_size
_lowerCAmelCase : Optional[int] = concat_input
_lowerCAmelCase : Dict = position_embedding_type
_lowerCAmelCase : Tuple = classifier_dropout
super().__init__(pad_token_id=__a, **__a)
| 658 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = "▁"
_snake_case = {"vocab_file": "sentencepiece.bpe.model"}
_snake_case = {
"vocab_file": {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"
),
}
}
_snake_case = {
"xlm-roberta-base": 512,
"xlm-roberta-large": 512,
"xlm-roberta-large-finetuned-conll02-dutch": 512,
"xlm-roberta-large-finetuned-conll02-spanish": 512,
"xlm-roberta-large-finetuned-conll03-english": 512,
"xlm-roberta-large-finetuned-conll03-german": 512,
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ['input_ids', 'attention_mask']
def __init__( self, __a, __a="<s>", __a="</s>", __a="</s>", __a="<s>", __a="<unk>", __a="<pad>", __a="<mask>", __a = None, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Dict = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else mask_token
_lowerCAmelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__a, eos_token=__a, unk_token=__a, sep_token=__a, cls_token=__a, pad_token=__a, mask_token=__a, sp_model_kwargs=self.sp_model_kwargs, **__a, )
_lowerCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(__a))
_lowerCAmelCase : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_lowerCAmelCase : Optional[int] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_lowerCAmelCase : Union[str, Any] = 1
_lowerCAmelCase : List[Any] = len(self.sp_model) + self.fairseq_offset
_lowerCAmelCase : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.__dict__.copy()
_lowerCAmelCase : Tuple = None
_lowerCAmelCase : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = d
# for backward compatibility
if not hasattr(self, "sp_model_kwargs"):
_lowerCAmelCase : List[str] = {}
_lowerCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase : Tuple = [self.cls_token_id]
_lowerCAmelCase : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case__ ( self, __a, __a = None, __a = False):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a, token_ids_a=__a, already_has_special_tokens=__a)
if token_ids_a is None:
return [1] + ([0] * len(__a)) + [1]
return [1] + ([0] * len(__a)) + [1, 1] + ([0] * len(__a)) + [1]
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Dict = [self.sep_token_id]
_lowerCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def snake_case__ ( self):
'''simple docstring'''
return len(self.sp_model) + self.fairseq_offset + 1 # Add the <mask> token
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = {self.convert_ids_to_tokens(__a): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def snake_case__ ( self, __a):
'''simple docstring'''
return self.sp_model.encode(__a, out_type=__a)
def snake_case__ ( self, __a):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowerCAmelCase : Dict = self.sp_model.PieceToId(__a)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def snake_case__ ( self, __a):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : int = "".join(__a).replace(__a, " ").strip()
return out_string
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
if not os.path.isdir(__a):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
_lowerCAmelCase : Tuple = os.path.join(
__a, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__a) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file, __a)
elif not os.path.isfile(self.vocab_file):
with open(__a, "wb") as fi:
_lowerCAmelCase : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(__a)
return (out_vocab_file,)
| 701 |
from __future__ import annotations
def A ( _lowerCamelCase ):
'''simple docstring'''
if not nums:
raise ValueError("List is empty" )
return sum(_lowerCamelCase ) / len(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 0 |
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
try:
with open(_lowerCamelCase , "rb" ) as flax_state_f:
_lowerCAmelCase : Union[str, Any] = from_bytes(_lowerCamelCase , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(_lowerCamelCase ) as f:
if f.read().startswith("version" ):
raise OSError(
"You seem to have cloned a repository without having git-lfs installed. Please"
" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"
" folder you cloned." )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F"Unable to convert {model_file} to Flax deserializable object. " )
return load_flax_weights_in_pytorch_model(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
_lowerCAmelCase : Dict = flatten_dict(jax.tree_util.tree_map(lambda _lowerCamelCase : x.dtype == jnp.bfloataa , _lowerCamelCase ) ).values()
if any(_lowerCamelCase ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
_lowerCAmelCase : Union[str, Any] = jax.tree_util.tree_map(
lambda _lowerCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _lowerCamelCase )
_lowerCAmelCase : Tuple = ""
_lowerCAmelCase : Tuple = flatten_dict(_lowerCamelCase , sep="." )
_lowerCAmelCase : Any = pt_model.state_dict()
# keep track of unexpected & missing keys
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : List[str] = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
_lowerCAmelCase : int = flax_key_tuple.split("." )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
_lowerCAmelCase : str = flax_key_tuple_array[:-1] + ["weight"]
_lowerCAmelCase : Any = jnp.transpose(_lowerCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
_lowerCAmelCase : str = flax_key_tuple_array[:-1] + ["weight"]
_lowerCAmelCase : str = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
_lowerCAmelCase : Tuple = flax_key_tuple_array[:-1] + ["weight"]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = (
flax_key_tuple_string.replace("_0" , ".0" )
.replace("_1" , ".1" )
.replace("_2" , ".2" )
.replace("_3" , ".3" )
.replace("_4" , ".4" )
.replace("_5" , ".5" )
.replace("_6" , ".6" )
.replace("_7" , ".7" )
.replace("_8" , ".8" )
.replace("_9" , ".9" )
)
_lowerCAmelCase : Any = ".".join(_lowerCamelCase )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected "
F"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." )
else:
# add weight to pytorch dict
_lowerCAmelCase : int = np.asarray(_lowerCamelCase ) if not isinstance(_lowerCamelCase , np.ndarray ) else flax_tensor
_lowerCAmelCase : Optional[Any] = torch.from_numpy(_lowerCamelCase )
# remove from missing keys
missing_keys.remove(_lowerCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_lowerCamelCase )
pt_model.load_state_dict(_lowerCamelCase )
# re-transform missing_keys to list
_lowerCAmelCase : Any = list(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
F" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"
F" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
F" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
if len(_lowerCamelCase ) > 0:
logger.warning(
F"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"
F" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"
" use it for predictions and inference." )
return pt_model
| 702 |
def A ( _lowerCamelCase ):
'''simple docstring'''
if length <= 0 or not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(_lowerCamelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 658 | 0 |
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
_snake_case = ""
if version.parse(importlib_metadata.version("jiwer")) < version.parse("2.3.0"):
class UpperCAmelCase_ ( tr.AbstractTransform):
def __init__( self, __a = " "):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = sentence_delimiter
def snake_case__ ( self, __a):
'''simple docstring'''
return list(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : int = []
for sent_idx, sentence in enumerate(__a):
chars.extend(self.process_string(__a))
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(__a) - 1:
chars.append(self.sentence_delimiter)
return chars
_snake_case = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
_snake_case = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
_snake_case = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
_snake_case = "\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n"
_snake_case = "\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> cer = datasets.load_metric(\"cer\")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def snake_case__ ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Value("string", id="sequence"),
"references": datasets.Value("string", id="sequence"),
}), codebase_urls=["https://github.com/jitsi/jiwer/"], reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
"https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates",
], )
def snake_case__ ( self, __a, __a, __a=False):
'''simple docstring'''
if concatenate_texts:
return jiwer.compute_measures(
__a, __a, truth_transform=__a, hypothesis_transform=__a, )["wer"]
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Tuple = 0
for prediction, reference in zip(__a, __a):
_lowerCAmelCase : Any = jiwer.compute_measures(
__a, __a, truth_transform=__a, hypothesis_transform=__a, )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 703 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def A ( _lowerCamelCase ):
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = np.nan
for i in range(_lowerCamelCase ):
_lowerCAmelCase : Tuple = features[:, labels == i]
_lowerCAmelCase : Dict = data.mean(1 )
# Centralize the data of class i
_lowerCAmelCase : Union[str, Any] = data - column_reshape(_lowerCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_lowerCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCAmelCase : int = np.dot(_lowerCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = features.mean(1 )
_lowerCAmelCase : List[str] = np.nan
for i in range(_lowerCamelCase ):
_lowerCAmelCase : str = features[:, labels == i]
_lowerCAmelCase : Optional[Any] = data.shape[1]
_lowerCAmelCase : Optional[Any] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase ) , (column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCAmelCase : Optional[Any] = device_data * np.dot(
column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase ) , (column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase )).T , )
return covariance_sum / features.shape[1]
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if features.any():
_lowerCAmelCase : List[Any] = features.mean(1 )
# Center the dataset
_lowerCAmelCase : List[Any] = features - np.reshape(_lowerCamelCase , (data_mean.size, 1) )
_lowerCAmelCase : Optional[Any] = np.dot(_lowerCamelCase , centered_data.T ) / features.shape[1]
_lowerCAmelCase , _lowerCAmelCase : List[Any] = np.linalg.eigh(_lowerCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
_lowerCAmelCase : Union[str, Any] = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
_lowerCAmelCase : List[Any] = np.dot(filtered_eigenvectors.T , _lowerCamelCase )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_lowerCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_lowerCAmelCase , _lowerCAmelCase : List[str] = eigh(
covariance_between_classes(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , covariance_within_classes(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
_lowerCAmelCase : List[str] = eigenvectors[:, ::-1][:, :dimensions]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = np.linalg.svd(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = svd_matrix[:, 0:dimensions]
_lowerCAmelCase : str = np.dot(filtered_svd_matrix.T , _lowerCamelCase )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_lowerCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
_lowerCAmelCase : List[Any] = np.array([0, 0, 0, 1, 1] )
_lowerCAmelCase : List[Any] = 2
_lowerCAmelCase : Union[str, Any] = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_lowerCamelCase ) as error_info:
_lowerCAmelCase : Union[str, Any] = linear_discriminant_analysis(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if isinstance(_lowerCamelCase , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
_lowerCAmelCase : List[str] = 2
_lowerCAmelCase : List[Any] = np.array([[6.92_82_03_23, 8.66_02_54_04, 10.39_23_04_85], [3.0, 3.0, 3.0]] )
with pytest.raises(_lowerCamelCase ) as error_info:
_lowerCAmelCase : Tuple = principal_component_analysis(_lowerCamelCase , _lowerCamelCase )
if not np.allclose(_lowerCamelCase , _lowerCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 0 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCAmelCase_ ( a , a):
@register_to_config
def __init__( self, *,
__a = 4, __a = 768, __a, __a, ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[Any] = nn.Parameter(torch.zeros(__a))
# parameters for additional clip time embeddings
_lowerCAmelCase : Dict = nn.Linear(__a, __a)
_lowerCAmelCase : int = nn.Linear(__a, __a)
# parameters for encoder hidden states
_lowerCAmelCase : List[Any] = clip_extra_context_tokens
_lowerCAmelCase : int = nn.Linear(
__a, self.clip_extra_context_tokens * cross_attention_dim)
_lowerCAmelCase : List[str] = nn.Linear(__a, __a)
_lowerCAmelCase : Dict = nn.LayerNorm(__a)
def snake_case__ ( self, *, __a, __a, __a, __a):
'''simple docstring'''
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
_lowerCAmelCase : List[Any] = image_embeddings.shape[0]
_lowerCAmelCase : Optional[Any] = self.learned_classifier_free_guidance_embeddings.unsqueeze(0)
_lowerCAmelCase : Optional[int] = classifier_free_guidance_embeddings.expand(
__a, -1)
_lowerCAmelCase : List[str] = torch.cat([classifier_free_guidance_embeddings, image_embeddings], dim=0)
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
_lowerCAmelCase : Dict = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
_lowerCAmelCase : List[str] = self.embedding_proj(__a)
_lowerCAmelCase : List[str] = self.clip_image_embeddings_project_to_time_embeddings(__a)
_lowerCAmelCase : List[Any] = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
_lowerCAmelCase : Any = self.clip_extra_context_tokens_proj(__a)
_lowerCAmelCase : List[Any] = clip_extra_context_tokens.reshape(__a, -1, self.clip_extra_context_tokens)
_lowerCAmelCase : Optional[int] = clip_extra_context_tokens.permute(0, 2, 1)
_lowerCAmelCase : str = self.encoder_hidden_states_proj(__a)
_lowerCAmelCase : Any = self.text_encoder_hidden_states_norm(__a)
_lowerCAmelCase : Optional[int] = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states], dim=1)
return text_encoder_hidden_states, additive_clip_time_embeddings
| 704 |
import requests
from bsa import BeautifulSoup
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = BeautifulSoup(requests.get(_lowerCamelCase , params=_lowerCamelCase ).content , "html.parser" )
_lowerCAmelCase : Any = soup.find("div" , attrs={"class": "gs_ri"} )
_lowerCAmelCase : str = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
_snake_case = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 658 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( a):
lowerCamelCase__ = ['image_processor', 'tokenizer']
lowerCamelCase__ = 'BridgeTowerImageProcessor'
lowerCamelCase__ = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self, __a, __a):
'''simple docstring'''
super().__init__(__a, __a)
def __call__( self, __a, __a = None, __a = True, __a = False, __a = None, __a = None, __a = 0, __a = None, __a = None, __a = None, __a = False, __a = False, __a = False, __a = False, __a = True, __a = None, **__a, ):
'''simple docstring'''
_lowerCAmelCase : int = self.tokenizer(
text=__a, add_special_tokens=__a, padding=__a, truncation=__a, max_length=__a, stride=__a, pad_to_multiple_of=__a, return_token_type_ids=__a, return_attention_mask=__a, return_overflowing_tokens=__a, return_special_tokens_mask=__a, return_offsets_mapping=__a, return_length=__a, verbose=__a, return_tensors=__a, **__a, )
# add pixel_values + pixel_mask
_lowerCAmelCase : Optional[Any] = self.image_processor(
__a, return_tensors=__a, do_normalize=__a, do_center_crop=__a, **__a)
encoding.update(__a)
return encoding
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
return self.tokenizer.batch_decode(*__a, **__a)
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
return self.tokenizer.decode(*__a, **__a)
@property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.tokenizer.model_input_names
_lowerCAmelCase : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 705 |
def A ( _lowerCamelCase = 1_000_000 ):
'''simple docstring'''
_lowerCAmelCase : Any = 1
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : List[str] = {1: 1}
for inputa in range(2 , _lowerCamelCase ):
_lowerCAmelCase : int = 0
_lowerCAmelCase : Any = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
_lowerCAmelCase : Any = (3 * number) + 1
counter += 1
if inputa not in counters:
_lowerCAmelCase : Tuple = counter
if counter > pre_counter:
_lowerCAmelCase : Union[str, Any] = inputa
_lowerCAmelCase : Union[str, Any] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 658 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 100 , ):
'''simple docstring'''
_lowerCAmelCase : Tuple = x_start
_lowerCAmelCase : str = fnc(_lowerCamelCase )
_lowerCAmelCase : int = 0.0
for _ in range(_lowerCamelCase ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
_lowerCAmelCase : Optional[int] = (x_end - x_start) / steps + xa
_lowerCAmelCase : Tuple = fnc(_lowerCamelCase )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
_lowerCAmelCase : Dict = xa
_lowerCAmelCase : Any = fxa
return area
if __name__ == "__main__":
def A ( _lowerCamelCase ):
'''simple docstring'''
return x**3 + x**2
print("f(x) = x^3 + x^2")
print("The area between the curve, x = -5, x = 5 and the x axis is:")
_snake_case = 10
while i <= 10_0000:
print(f'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 10
| 706 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = "https://openaipublic.azureedge.net/jukebox/models/"
_snake_case = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def A ( _lowerCamelCase ):
'''simple docstring'''
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : int = key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : Union[str, Any] = key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : int = key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
_lowerCAmelCase : List[str] = key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
_lowerCAmelCase : int = key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_lowerCAmelCase : int = key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
_lowerCAmelCase : Tuple = key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = {}
import re
_lowerCAmelCase : Union[str, Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[str] = re.compile(
r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[str] = re.compile(
r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : int = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(
r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : Optional[int] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_encoder_block_conv_in.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : List[Any] = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : str = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : Tuple = re_encoder_block_conv_in.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = re_encoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : str = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : str = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Union[str, Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
_lowerCAmelCase : Optional[Any] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : int = prefix + resnet_block
_lowerCAmelCase : int = re_encoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_proj_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = re_encoder_block_proj_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
_lowerCAmelCase : str = re_encoder_block_proj_out.sub(_lowerCamelCase , _lowerCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_decoder_block_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Optional[int] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : str = re_decoder_block_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_decoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Union[str, Any] = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Optional[Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
_lowerCAmelCase : Optional[int] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : Dict = prefix + resnet_block
_lowerCAmelCase : Dict = re_decoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_decoder_block_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
_lowerCAmelCase : Any = re_decoder_block_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_prior_cond_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Tuple = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : Optional[int] = re_prior_cond_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_prior_cond_resnet.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : Union[str, Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : List[str] = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Optional[Any] = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
_lowerCAmelCase : Tuple = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : List[Any] = prefix + resnet_block
_lowerCAmelCase : Optional[Any] = re_prior_cond_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : int = re_prior_cond_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = regex_match.groups()
_lowerCAmelCase : Optional[int] = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
_lowerCAmelCase : List[str] = re_prior_cond_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# keep original key
else:
_lowerCAmelCase : Optional[int] = original_key
_lowerCAmelCase : Tuple = replace_key(_lowerCamelCase )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
_lowerCAmelCase : Any = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
_lowerCAmelCase : Tuple = original_key
_lowerCAmelCase : List[Any] = original_key
_lowerCAmelCase : Optional[int] = value
return new_dict
@torch.no_grad()
def A ( _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
_lowerCAmelCase : List[Any] = requests.get(F"{PREFIX}{file}" , allow_redirects=_lowerCamelCase )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=_lowerCamelCase )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , "wb" ).write(r.content )
_lowerCAmelCase : Optional[Any] = MODEL_MAPPING[model_name.split("/" )[-1]]
_lowerCAmelCase : Tuple = JukeboxConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = JukeboxModel(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : List[Any] = {}
for i, dict_name in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Any = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )["model"]
_lowerCAmelCase : Union[str, Any] = {}
for k in old_dic.keys():
if k.endswith(".b" ):
_lowerCAmelCase : Dict = old_dic[k]
elif k.endswith(".w" ):
_lowerCAmelCase : Tuple = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_lowerCAmelCase : str = old_dic[k]
else:
_lowerCAmelCase : Union[str, Any] = old_dic[k]
_lowerCAmelCase : Union[str, Any] = "vqvae" if i == 0 else F"priors.{3 - i}"
_lowerCAmelCase : Union[str, Any] = fix_jukebox_keys(_lowerCamelCase , model.state_dict() , _lowerCamelCase , _lowerCamelCase )
weight_dict.append(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
with open(F"{pytorch_dump_folder_path}/mapping.json" , "w" ) as txtfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
return weight_dict
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
_snake_case = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 658 | 0 |
import heapq
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(_lowerCamelCase , [-1 * len(_lowerCamelCase ), (key, value)] )
# chosen_vertices = set of chosen vertices
_lowerCAmelCase : Tuple = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
_lowerCAmelCase : List[str] = heapq.heappop(_lowerCamelCase )[1][0]
chosen_vertices.add(_lowerCamelCase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
_lowerCAmelCase : Union[str, Any] = elem[1][1].index(_lowerCamelCase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(_lowerCamelCase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 707 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (ksize % 2) == 0:
_lowerCAmelCase : str = ksize + 1
_lowerCAmelCase : List[str] = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(_lowerCamelCase ):
for x in range(_lowerCamelCase ):
# distance from center
_lowerCAmelCase : int = x - ksize // 2
_lowerCAmelCase : Dict = y - ksize // 2
# degree to radiant
_lowerCAmelCase : List[Any] = theta / 180 * np.pi
_lowerCAmelCase : int = np.cos(_theta )
_lowerCAmelCase : Optional[int] = np.sin(_theta )
# get kernel x
_lowerCAmelCase : int = cos_theta * px + sin_theta * py
# get kernel y
_lowerCAmelCase : str = -sin_theta * px + cos_theta * py
# fill kernel
_lowerCAmelCase : Union[str, Any] = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_snake_case = imread("../image_data/lena.jpg")
# turn image in gray scale value
_snake_case = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_snake_case = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
_snake_case = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_snake_case = out / out.max() * 255
_snake_case = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 658 | 0 |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class UpperCAmelCase_ ( a):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = tempfile.mkdtemp()
_lowerCAmelCase : Dict = 5
# Realm tok
_lowerCAmelCase : List[str] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_lowerCAmelCase : int = os.path.join(self.tmpdirname, "realm_tokenizer")
os.makedirs(__a, exist_ok=__a)
_lowerCAmelCase : Dict = os.path.join(__a, VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
_lowerCAmelCase : List[str] = os.path.join(self.tmpdirname, "realm_block_records")
os.makedirs(__a, exist_ok=__a)
def snake_case__ ( self):
'''simple docstring'''
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname, "realm_tokenizer"))
def snake_case__ ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = RealmConfig(num_block_records=self.num_block_records)
return config
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
})
return dataset
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = np.array(
[
B"This is the first record",
B"This is the second record",
B"This is the third record",
B"This is the fourth record",
B"This is the fifth record",
B"This is a longer longer longer record",
], dtype=__a, )
return block_records
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = RealmRetriever(
block_records=self.get_dummy_block_records(), tokenizer=self.get_tokenizer(), )
return retriever
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.get_config()
_lowerCAmelCase : List[Any] = self.get_dummy_retriever()
_lowerCAmelCase : Union[str, Any] = retriever.tokenizer
_lowerCAmelCase : Tuple = np.array([0, 3], dtype="long")
_lowerCAmelCase : str = tokenizer(["Test question"]).input_ids
_lowerCAmelCase : Tuple = tokenizer(
["the fourth"], add_special_tokens=__a, return_token_type_ids=__a, return_attention_mask=__a, ).input_ids
_lowerCAmelCase : List[str] = config.reader_seq_len
_lowerCAmelCase : List[Any] = retriever(
__a, __a, answer_ids=__a, max_length=__a, return_tensors="np")
self.assertEqual(len(__a), 2)
self.assertEqual(len(__a), 2)
self.assertEqual(len(__a), 2)
self.assertEqual(concat_inputs.input_ids.shape, (2, 10))
self.assertEqual(concat_inputs.attention_mask.shape, (2, 10))
self.assertEqual(concat_inputs.token_type_ids.shape, (2, 10))
self.assertEqual(concat_inputs.special_tokens_mask.shape, (2, 10))
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0]), ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"], )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1]), ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"], )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = self.get_config()
_lowerCAmelCase : Tuple = self.get_dummy_retriever()
_lowerCAmelCase : str = retriever.tokenizer
_lowerCAmelCase : List[Any] = np.array([0, 3, 5], dtype="long")
_lowerCAmelCase : Optional[Any] = tokenizer(["Test question"]).input_ids
_lowerCAmelCase : List[str] = tokenizer(
["the fourth", "longer longer"], add_special_tokens=__a, return_token_type_ids=__a, return_attention_mask=__a, ).input_ids
_lowerCAmelCase : int = config.reader_seq_len
_lowerCAmelCase : Any = retriever(
__a, __a, answer_ids=__a, max_length=__a, return_tensors="np")
self.assertEqual([False, True, True], __a)
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]], __a)
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]], __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname, "realm_block_records"))
# Test local path
_lowerCAmelCase : str = retriever.from_pretrained(os.path.join(self.tmpdirname, "realm_block_records"))
self.assertEqual(retriever.block_records[0], B"This is the first record")
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download") as mock_hf_hub_download:
_lowerCAmelCase : Dict = os.path.join(
os.path.join(self.tmpdirname, "realm_block_records"), _REALM_BLOCK_RECORDS_FILENAME)
_lowerCAmelCase : Any = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa")
self.assertEqual(retriever.block_records[0], B"This is the first record")
| 708 |
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
for i in range(1 , _lowerCamelCase ):
_lowerCAmelCase : List[Any] = collection[i]
_lowerCAmelCase : str = 0
_lowerCAmelCase : Union[str, Any] = i - 1
while low <= high:
_lowerCAmelCase : List[str] = (low + high) // 2
if val < collection[mid]:
_lowerCAmelCase : Optional[int] = mid - 1
else:
_lowerCAmelCase : List[str] = mid + 1
for j in range(_lowerCamelCase , _lowerCamelCase , -1 ):
_lowerCAmelCase : int = collection[j - 1]
_lowerCAmelCase : Optional[int] = val
return collection
if __name__ == "__main__":
_snake_case = input("Enter numbers separated by a comma:\n").strip()
_snake_case = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 658 | 0 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = 0
if start < end:
_lowerCAmelCase : int = randint(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : List[Any] = a[end]
_lowerCAmelCase : List[str] = a[pivot]
_lowerCAmelCase : Dict = temp
_lowerCAmelCase : str = _in_place_partition(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
count += _in_place_quick_sort(_lowerCamelCase , _lowerCamelCase , p - 1 )
count += _in_place_quick_sort(_lowerCamelCase , p + 1 , _lowerCamelCase )
return count
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = 0
_lowerCAmelCase : int = randint(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : int = a[end]
_lowerCAmelCase : Optional[int] = a[pivot]
_lowerCAmelCase : int = temp
_lowerCAmelCase : Optional[int] = start - 1
for index in range(_lowerCamelCase , _lowerCamelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_lowerCAmelCase : str = new_pivot_index + 1
_lowerCAmelCase : Optional[int] = a[new_pivot_index]
_lowerCAmelCase : Optional[int] = a[index]
_lowerCAmelCase : Union[str, Any] = temp
_lowerCAmelCase : List[Any] = a[new_pivot_index + 1]
_lowerCAmelCase : Union[str, Any] = a[end]
_lowerCAmelCase : Any = temp
return new_pivot_index + 1, count
_snake_case = TemporaryFile()
_snake_case = 100 # 1000 elements are to be sorted
_snake_case, _snake_case = 0, 1 # mean and standard deviation
_snake_case = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
_snake_case = np.load(outfile)
_snake_case = len(M) - 1
_snake_case = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 709 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = 'focalnet'
def __init__( self, __a=224, __a=4, __a=3, __a=96, __a=False, __a=[192, 384, 768, 768], __a=[2, 2, 6, 2], __a=[2, 2, 2, 2], __a=[3, 3, 3, 3], __a="gelu", __a=4.0, __a=0.0, __a=0.1, __a=False, __a=1E-4, __a=False, __a=False, __a=False, __a=0.02, __a=1E-5, __a=32, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : str = image_size
_lowerCAmelCase : List[str] = patch_size
_lowerCAmelCase : List[Any] = num_channels
_lowerCAmelCase : Tuple = embed_dim
_lowerCAmelCase : List[Any] = use_conv_embed
_lowerCAmelCase : Any = hidden_sizes
_lowerCAmelCase : Tuple = depths
_lowerCAmelCase : Dict = focal_levels
_lowerCAmelCase : Optional[Any] = focal_windows
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Union[str, Any] = mlp_ratio
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Dict = drop_path_rate
_lowerCAmelCase : str = use_layerscale
_lowerCAmelCase : str = layerscale_value
_lowerCAmelCase : Union[str, Any] = use_post_layernorm
_lowerCAmelCase : Optional[int] = use_post_layernorm_in_modulation
_lowerCAmelCase : str = normalize_modulator
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
_lowerCAmelCase : Any = encoder_stride
_lowerCAmelCase : List[str] = ["stem"] + [f"stage{idx}" for idx in range(1, len(self.depths) + 1)]
_lowerCAmelCase , _lowerCAmelCase : List[str] = get_aligned_output_features_output_indices(
out_features=__a, out_indices=__a, stage_names=self.stage_names)
| 658 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
_snake_case = {
"configuration_speecht5": [
"SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP",
"SpeechT5Config",
"SpeechT5HifiGanConfig",
],
"feature_extraction_speecht5": ["SpeechT5FeatureExtractor"],
"processing_speecht5": ["SpeechT5Processor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["SpeechT5Tokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"SpeechT5ForSpeechToText",
"SpeechT5ForSpeechToSpeech",
"SpeechT5ForTextToSpeech",
"SpeechT5Model",
"SpeechT5PreTrainedModel",
"SpeechT5HifiGan",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 710 |
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def count_of_possible_combinations(_lowerCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
_lowerCamelCase , _lowerCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_lowerCAmelCase : Optional[int] = sum(
count_of_possible_combinations_with_dp_array(target - item , _lowerCamelCase )
for item in array )
_lowerCAmelCase : Any = answer
return answer
_lowerCAmelCase : List[Any] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [0] * (target + 1)
_lowerCAmelCase : List[str] = 1
for i in range(1 , target + 1 ):
for j in range(_lowerCamelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = 3
_snake_case = 5
_snake_case = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 658 | 0 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( a):
lowerCamelCase__ = (CMStochasticIterativeScheduler,)
lowerCamelCase__ = 10
def snake_case__ ( self, **__a):
'''simple docstring'''
_lowerCAmelCase : Any = {
"num_train_timesteps": 201,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
config.update(**__a)
return config
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 10
_lowerCAmelCase : List[str] = self.get_scheduler_config()
_lowerCAmelCase : Any = self.scheduler_classes[0](**__a)
scheduler.set_timesteps(__a)
_lowerCAmelCase : Tuple = scheduler.timesteps[0]
_lowerCAmelCase : Tuple = scheduler.timesteps[1]
_lowerCAmelCase : Tuple = self.dummy_sample
_lowerCAmelCase : Dict = 0.1 * sample
_lowerCAmelCase : Optional[int] = scheduler.step(__a, __a, __a).prev_sample
_lowerCAmelCase : Optional[int] = scheduler.step(__a, __a, __a).prev_sample
self.assertEqual(output_a.shape, sample.shape)
self.assertEqual(output_a.shape, output_a.shape)
def snake_case__ ( self):
'''simple docstring'''
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=__a)
def snake_case__ ( self):
'''simple docstring'''
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.scheduler_classes[0]
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : List[Any] = scheduler_class(**__a)
_lowerCAmelCase : List[str] = 1
scheduler.set_timesteps(__a)
_lowerCAmelCase : Dict = scheduler.timesteps
_lowerCAmelCase : List[str] = torch.manual_seed(0)
_lowerCAmelCase : Tuple = self.dummy_model()
_lowerCAmelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(__a):
# 1. scale model input
_lowerCAmelCase : Any = scheduler.scale_model_input(__a, __a)
# 2. predict noise residual
_lowerCAmelCase : Optional[Any] = model(__a, __a)
# 3. predict previous sample x_t-1
_lowerCAmelCase : Optional[int] = scheduler.step(__a, __a, __a, generator=__a).prev_sample
_lowerCAmelCase : Any = pred_prev_sample
_lowerCAmelCase : Dict = torch.sum(torch.abs(__a))
_lowerCAmelCase : List[str] = torch.mean(torch.abs(__a))
assert abs(result_sum.item() - 192.7_614) < 1E-2
assert abs(result_mean.item() - 0.2_510) < 1E-3
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
_lowerCAmelCase : Optional[Any] = self.get_scheduler_config()
_lowerCAmelCase : List[Any] = scheduler_class(**__a)
_lowerCAmelCase : List[str] = [106, 0]
scheduler.set_timesteps(timesteps=__a)
_lowerCAmelCase : str = scheduler.timesteps
_lowerCAmelCase : Tuple = torch.manual_seed(0)
_lowerCAmelCase : Optional[Any] = self.dummy_model()
_lowerCAmelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
_lowerCAmelCase : Any = scheduler.scale_model_input(__a, __a)
# 2. predict noise residual
_lowerCAmelCase : Union[str, Any] = model(__a, __a)
# 3. predict previous sample x_t-1
_lowerCAmelCase : List[Any] = scheduler.step(__a, __a, __a, generator=__a).prev_sample
_lowerCAmelCase : int = pred_prev_sample
_lowerCAmelCase : List[str] = torch.sum(torch.abs(__a))
_lowerCAmelCase : Union[str, Any] = torch.mean(torch.abs(__a))
assert abs(result_sum.item() - 347.6_357) < 1E-2
assert abs(result_mean.item() - 0.4_527) < 1E-3
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
_lowerCAmelCase : Dict = self.get_scheduler_config()
_lowerCAmelCase : Union[str, Any] = scheduler_class(**__a)
_lowerCAmelCase : Tuple = [39, 30, 12, 15, 0]
with self.assertRaises(__a, msg="`timesteps` must be in descending order."):
scheduler.set_timesteps(timesteps=__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.scheduler_classes[0]
_lowerCAmelCase : Optional[int] = self.get_scheduler_config()
_lowerCAmelCase : Tuple = scheduler_class(**__a)
_lowerCAmelCase : Any = [39, 30, 12, 1, 0]
_lowerCAmelCase : Dict = len(__a)
with self.assertRaises(__a, msg="Can only pass one of `num_inference_steps` or `timesteps`."):
scheduler.set_timesteps(num_inference_steps=__a, timesteps=__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.scheduler_classes[0]
_lowerCAmelCase : List[Any] = self.get_scheduler_config()
_lowerCAmelCase : Optional[Any] = scheduler_class(**__a)
_lowerCAmelCase : Optional[Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__a, msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}", ):
scheduler.set_timesteps(timesteps=__a)
| 711 |
import string
def A ( _lowerCamelCase ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
_lowerCAmelCase : str = ""
for symbol in message:
if symbol in string.ascii_uppercase:
_lowerCAmelCase : List[str] = string.ascii_uppercase.find(_lowerCamelCase )
_lowerCAmelCase : Dict = num - key
if num < 0:
_lowerCAmelCase : Dict = num + len(string.ascii_uppercase )
_lowerCAmelCase : Optional[Any] = translated + string.ascii_uppercase[num]
else:
_lowerCAmelCase : int = translated + symbol
print(F"Decryption using Key #{key}: {translated}" )
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = input("Encrypted message: " )
_lowerCAmelCase : Dict = message.upper()
decrypt(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 658 | 0 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class UpperCAmelCase_ :
def __init__( self, __a, __a, __a, __a, __a, __a=0.2, __a=0.2):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = bp_numa
_lowerCAmelCase : Union[str, Any] = bp_numa
_lowerCAmelCase : str = bp_numa
_lowerCAmelCase : int = conva_get[:2]
_lowerCAmelCase : Optional[int] = conva_get[2]
_lowerCAmelCase : List[Any] = size_pa
_lowerCAmelCase : str = rate_w
_lowerCAmelCase : Any = rate_t
_lowerCAmelCase : Dict = [
np.mat(-1 * np.random.rand(self.conva[0], self.conva[0]) + 0.5)
for i in range(self.conva[1])
]
_lowerCAmelCase : Tuple = np.mat(-1 * np.random.rand(self.num_bpa, self.num_bpa) + 0.5)
_lowerCAmelCase : Dict = np.mat(-1 * np.random.rand(self.num_bpa, self.num_bpa) + 0.5)
_lowerCAmelCase : List[Any] = -2 * np.random.rand(self.conva[1]) + 1
_lowerCAmelCase : str = -2 * np.random.rand(self.num_bpa) + 1
_lowerCAmelCase : List[str] = -2 * np.random.rand(self.num_bpa) + 1
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(__a, "wb") as f:
pickle.dump(__a, __a)
print(f"Model saved: {save_path}")
@classmethod
def snake_case__ ( cls, __a):
'''simple docstring'''
with open(__a, "rb") as f:
_lowerCAmelCase : Any = pickle.load(__a) # noqa: S301
_lowerCAmelCase : Tuple = model_dic.get("conv1")
conv_get.append(model_dic.get("step_conv1"))
_lowerCAmelCase : int = model_dic.get("size_pooling1")
_lowerCAmelCase : Optional[Any] = model_dic.get("num_bp1")
_lowerCAmelCase : Optional[int] = model_dic.get("num_bp2")
_lowerCAmelCase : Any = model_dic.get("num_bp3")
_lowerCAmelCase : Dict = model_dic.get("rate_weight")
_lowerCAmelCase : int = model_dic.get("rate_thre")
# create model instance
_lowerCAmelCase : int = CNN(__a, __a, __a, __a, __a, __a, __a)
# modify model parameter
_lowerCAmelCase : Tuple = model_dic.get("w_conv1")
_lowerCAmelCase : Optional[int] = model_dic.get("wkj")
_lowerCAmelCase : List[str] = model_dic.get("vji")
_lowerCAmelCase : List[Any] = model_dic.get("thre_conv1")
_lowerCAmelCase : Union[str, Any] = model_dic.get("thre_bp2")
_lowerCAmelCase : List[Any] = model_dic.get("thre_bp3")
return conv_ins
def snake_case__ ( self, __a):
'''simple docstring'''
return 1 / (1 + np.exp(-1 * x))
def snake_case__ ( self, __a):
'''simple docstring'''
return round(__a, 3)
def snake_case__ ( self, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = convs[0]
_lowerCAmelCase : Union[str, Any] = convs[1]
_lowerCAmelCase : Optional[int] = np.shape(__a)[0]
# get the data slice of original image data, data_focus
_lowerCAmelCase : Optional[Any] = []
for i_focus in range(0, size_data - size_conv + 1, __a):
for j_focus in range(0, size_data - size_conv + 1, __a):
_lowerCAmelCase : Tuple = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__a)
# calculate the feature map of every single kernel, and saved as list of matrix
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : List[str] = int((size_data - size_conv) / conv_step + 1)
for i_map in range(__a):
_lowerCAmelCase : List[Any] = []
for i_focus in range(len(__a)):
_lowerCAmelCase : List[Any] = (
np.sum(np.multiply(data_focus[i_focus], w_convs[i_map]))
- thre_convs[i_map]
)
featuremap.append(self.sig(__a))
_lowerCAmelCase : List[str] = np.asmatrix(__a).reshape(
__a, __a)
data_featuremap.append(__a)
# expanding the data slice to One dimenssion
_lowerCAmelCase : Any = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__a))
_lowerCAmelCase : List[str] = np.asarray(__a)
return focus_list, data_featuremap
def snake_case__ ( self, __a, __a, __a="average_pool"):
'''simple docstring'''
_lowerCAmelCase : str = len(featuremaps[0])
_lowerCAmelCase : int = int(size_map / size_pooling)
_lowerCAmelCase : List[Any] = []
for i_map in range(len(__a)):
_lowerCAmelCase : int = featuremaps[i_map]
_lowerCAmelCase : List[Any] = []
for i_focus in range(0, __a, __a):
for j_focus in range(0, __a, __a):
_lowerCAmelCase : Any = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__a))
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__a))
_lowerCAmelCase : Union[str, Any] = np.asmatrix(__a).reshape(__a, __a)
featuremap_pooled.append(__a)
return featuremap_pooled
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = []
for i in range(len(__a)):
_lowerCAmelCase : str = np.shape(data[i])
_lowerCAmelCase : List[str] = data[i].reshape(1, shapes[0] * shapes[1])
_lowerCAmelCase : Any = data_listed.getA().tolist()[0]
data_expanded.extend(__a)
_lowerCAmelCase : Optional[int] = np.asarray(__a)
return data_expanded
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Any = np.asarray(__a)
_lowerCAmelCase : int = np.shape(__a)
_lowerCAmelCase : Union[str, Any] = data_mat.reshape(1, shapes[0] * shapes[1])
return data_expanded
def snake_case__ ( self, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : str = 0
for i_map in range(__a):
_lowerCAmelCase : List[Any] = np.ones((size_map, size_map))
for i in range(0, __a, __a):
for j in range(0, __a, __a):
_lowerCAmelCase : int = pd_pool[
i_pool
]
_lowerCAmelCase : Any = i_pool + 1
_lowerCAmelCase : Tuple = np.multiply(
__a, np.multiply(out_map[i_map], (1 - out_map[i_map])))
pd_all.append(__a)
return pd_all
def snake_case__ ( self, __a, __a, __a, __a, __a, __a=bool):
'''simple docstring'''
print("----------------------Start Training-------------------------")
print((" - - Shape: Train_Data ", np.shape(__a)))
print((" - - Shape: Teach_Data ", np.shape(__a)))
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : Dict = []
_lowerCAmelCase : int = 1_0000
while rp < n_repeat and mse >= error_accuracy:
_lowerCAmelCase : List[Any] = 0
print(f"-------------Learning Time {rp}--------------")
for p in range(len(__a)):
# print('------------Learning Image: %d--------------'%p)
_lowerCAmelCase : Optional[Any] = np.asmatrix(datas_train[p])
_lowerCAmelCase : Optional[int] = np.asarray(datas_teach[p])
_lowerCAmelCase : str = self.convolute(
__a, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, )
_lowerCAmelCase : int = self.pooling(__a, self.size_poolinga)
_lowerCAmelCase : Tuple = np.shape(__a)
_lowerCAmelCase : Optional[Any] = self._expand(__a)
_lowerCAmelCase : List[Any] = data_bp_input
_lowerCAmelCase : str = np.dot(__a, self.vji.T) - self.thre_bpa
_lowerCAmelCase : List[Any] = self.sig(__a)
_lowerCAmelCase : List[str] = np.dot(__a, self.wkj.T) - self.thre_bpa
_lowerCAmelCase : Any = self.sig(__a)
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
_lowerCAmelCase : Dict = np.multiply(
(data_teach - bp_outa), np.multiply(__a, (1 - bp_outa)))
_lowerCAmelCase : int = np.multiply(
np.dot(__a, self.wkj), np.multiply(__a, (1 - bp_outa)))
_lowerCAmelCase : Union[str, Any] = np.dot(__a, self.vji)
_lowerCAmelCase : int = pd_i_all / (self.size_poolinga * self.size_poolinga)
_lowerCAmelCase : int = pd_conva_pooled.T.getA().tolist()
_lowerCAmelCase : int = self._calculate_gradient_from_pool(
__a, __a, shape_featuremapa[0], shape_featuremapa[1], self.size_poolinga, )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1]):
_lowerCAmelCase : Dict = self._expand_mat(pd_conva_all[k_conv])
_lowerCAmelCase : Union[str, Any] = self.rate_weight * np.dot(__a, __a)
_lowerCAmelCase : List[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]))
_lowerCAmelCase : List[Any] = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv]) * self.rate_thre
)
# all connected layer
_lowerCAmelCase : Dict = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
_lowerCAmelCase : List[str] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
_lowerCAmelCase : int = self.thre_bpa - pd_k_all * self.rate_thre
_lowerCAmelCase : int = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
_lowerCAmelCase : Optional[int] = np.sum(abs(data_teach - bp_outa))
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
_lowerCAmelCase : Union[str, Any] = rp + 1
_lowerCAmelCase : Optional[int] = error_count / patterns
all_mse.append(__a)
def draw_error():
_lowerCAmelCase : Any = [error_accuracy for i in range(int(n_repeat * 1.2))]
plt.plot(__a, "+-")
plt.plot(__a, "r--")
plt.xlabel("Learning Times")
plt.ylabel("All_mse")
plt.grid(__a, alpha=0.5)
plt.show()
print("------------------Training Complished---------------------")
print((" - - Training epoch: ", rp, f" - - Mse: {mse:.6f}"))
if draw_e:
draw_error()
return mse
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = []
print("-------------------Start Testing-------------------------")
print((" - - Shape: Test_Data ", np.shape(__a)))
for p in range(len(__a)):
_lowerCAmelCase : Tuple = np.asmatrix(datas_test[p])
_lowerCAmelCase : List[Any] = self.convolute(
__a, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, )
_lowerCAmelCase : Optional[Any] = self.pooling(__a, self.size_poolinga)
_lowerCAmelCase : Optional[Any] = self._expand(__a)
_lowerCAmelCase : List[Any] = data_bp_input
_lowerCAmelCase : str = bp_outa * self.vji.T - self.thre_bpa
_lowerCAmelCase : List[Any] = self.sig(__a)
_lowerCAmelCase : Any = bp_outa * self.wkj.T - self.thre_bpa
_lowerCAmelCase : Optional[int] = self.sig(__a)
produce_out.extend(bp_outa.getA().tolist())
_lowerCAmelCase : str = [list(map(self.do_round, __a)) for each in produce_out]
return np.asarray(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = np.asmatrix(__a)
_lowerCAmelCase : int = self.convolute(
__a, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, )
_lowerCAmelCase : List[str] = self.pooling(__a, self.size_poolinga)
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 712 |
import requests
from bsa import BeautifulSoup
def A ( _lowerCamelCase = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
_lowerCAmelCase : str = BeautifulSoup(requests.get(_lowerCamelCase ).text , "html.parser" )
_lowerCAmelCase : str = soup.findAll("h1" )
_lowerCAmelCase : Optional[int] = soup.findAll("div" , {"class": "maincounter-number"} )
keys += soup.findAll("span" , {"class": "panel-title"} )
values += soup.findAll("div" , {"class": "number-table-main"} )
return {key.text.strip(): value.text.strip() for key, value in zip(_lowerCamelCase , _lowerCamelCase )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 658 | 0 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = []
for part_id in partition_order:
_lowerCAmelCase : List[Any] = df.where(F"SPARK_PARTITION_ID() = {part_id}" ).collect()
for row_idx, row in enumerate(_lowerCamelCase ):
expected_row_ids_and_row_dicts.append((F"{part_id}_{row_idx}", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_lowerCAmelCase : List[Any] = spark.range(100 ).repartition(1 )
_lowerCAmelCase : Optional[Any] = Spark(_lowerCamelCase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Dict = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_lowerCAmelCase : Dict = spark.range(10 ).repartition(2 )
_lowerCAmelCase : Union[str, Any] = [1, 0]
_lowerCAmelCase : Any = _generate_iterable_examples(_lowerCamelCase , _lowerCamelCase ) # Reverse the partitions.
_lowerCAmelCase : Dict = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , _lowerCamelCase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
_lowerCAmelCase : Dict = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Any = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_lowerCAmelCase : List[Any] = spark.range(10 ).repartition(1 )
_lowerCAmelCase : Dict = SparkExamplesIterable(_lowerCamelCase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
assert row_id == F"0_{i}"
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_lowerCAmelCase : Tuple = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator" ) as generator_mock:
_lowerCAmelCase : Dict = lambda _lowerCamelCase : x.reverse()
_lowerCAmelCase : Tuple = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [2, 1, 0] )
_lowerCAmelCase : List[str] = SparkExamplesIterable(_lowerCamelCase ).shuffle_data_sources(_lowerCamelCase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
_lowerCAmelCase : int = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_lowerCAmelCase : int = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
_lowerCAmelCase : Optional[int] = SparkExamplesIterable(_lowerCamelCase ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
_lowerCAmelCase : Tuple = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [0, 2] )
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
_lowerCAmelCase : str = SparkExamplesIterable(_lowerCamelCase ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
_lowerCAmelCase : Union[str, Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [1, 3] )
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def A ( ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_lowerCAmelCase : Optional[int] = spark.range(100 ).repartition(1 )
_lowerCAmelCase : Optional[Any] = Spark(_lowerCamelCase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 713 |
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCAmelCase_ :
def __init__( self, __a, __a):
'''simple docstring'''
if len(__a) != degree + 1:
raise ValueError(
"The number of coefficients should be equal to the degree + 1.")
_lowerCAmelCase : list[float] = list(__a)
_lowerCAmelCase : Any = degree
def __add__( self, __a):
'''simple docstring'''
if self.degree > polynomial_a.degree:
_lowerCAmelCase : Dict = self.coefficients[:]
for i in range(polynomial_a.degree + 1):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree, __a)
else:
_lowerCAmelCase : Union[str, Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree, __a)
def __sub__( self, __a):
'''simple docstring'''
return self + polynomial_a * Polynomial(0, [-1])
def __neg__( self):
'''simple docstring'''
return Polynomial(self.degree, [-c for c in self.coefficients])
def __mul__( self, __a):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1):
for j in range(polynomial_a.degree + 1):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree, __a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : int | float = 0
for i in range(self.degree + 1):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = ""
for i in range(self.degree, -1, -1):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i]))
elif i == 1:
polynomial += str(abs(self.coefficients[i])) + "x"
else:
polynomial += str(abs(self.coefficients[i])) + "x^" + str(__a)
return polynomial
def __repr__( self):
'''simple docstring'''
return self.__str__()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * self.degree
for i in range(self.degree):
_lowerCAmelCase : List[Any] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1, __a)
def snake_case__ ( self, __a = 0):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * (self.degree + 2)
_lowerCAmelCase : Optional[Any] = constant
for i in range(self.degree + 1):
_lowerCAmelCase : Dict = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1, __a)
def __eq__( self, __a):
'''simple docstring'''
if not isinstance(__a, __a):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self, __a):
'''simple docstring'''
return not self.__eq__(__a)
| 658 | 0 |
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
_snake_case = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
_snake_case = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = create_model(
"HTSAT-tiny" , "roberta" , _lowerCamelCase , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=_lowerCamelCase , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = {}
_lowerCAmelCase : List[Any] = r".*sequential.(\d+).*"
_lowerCAmelCase : Optional[int] = r".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_lowerCAmelCase : Union[str, Any] = key.replace(_lowerCamelCase , _lowerCamelCase )
if re.match(_lowerCamelCase , _lowerCamelCase ):
# replace sequential layers with list
_lowerCAmelCase : Optional[Any] = re.match(_lowerCamelCase , _lowerCamelCase ).group(1 )
_lowerCAmelCase : Tuple = key.replace(F"sequential.{sequential_layer}." , F"layers.{int(_lowerCamelCase )//3}.linear." )
elif re.match(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : str = int(re.match(_lowerCamelCase , _lowerCamelCase ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
_lowerCAmelCase : Optional[Any] = 1 if projecton_layer == 0 else 2
_lowerCAmelCase : Any = key.replace(F"_projection.{projecton_layer}." , F"_projection.linear{transformers_projection_layer}." )
if "audio" and "qkv" in key:
# split qkv into query key and value
_lowerCAmelCase : int = value
_lowerCAmelCase : str = mixed_qkv.size(0 ) // 3
_lowerCAmelCase : Optional[int] = mixed_qkv[:qkv_dim]
_lowerCAmelCase : Tuple = mixed_qkv[qkv_dim : qkv_dim * 2]
_lowerCAmelCase : Dict = mixed_qkv[qkv_dim * 2 :]
_lowerCAmelCase : Tuple = query_layer
_lowerCAmelCase : Optional[int] = key_layer
_lowerCAmelCase : Optional[Any] = value_layer
else:
_lowerCAmelCase : str = value
return model_state_dict
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Any = init_clap(_lowerCamelCase , enable_fusion=_lowerCamelCase )
clap_model.eval()
_lowerCAmelCase : Tuple = clap_model.state_dict()
_lowerCAmelCase : List[Any] = rename_state_dict(_lowerCamelCase )
_lowerCAmelCase : List[str] = ClapConfig()
_lowerCAmelCase : Tuple = enable_fusion
_lowerCAmelCase : str = ClapModel(_lowerCamelCase )
# ignore the spectrogram embedding layer
model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
transformers_config.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
_snake_case = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 714 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'xlnet'
lowerCamelCase__ = ['mems']
lowerCamelCase__ = {
'n_token': 'vocab_size', # Backward compatibility
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self, __a=3_2000, __a=1024, __a=24, __a=16, __a=4096, __a="gelu", __a=True, __a="bi", __a=0.02, __a=1E-12, __a=0.1, __a=512, __a=None, __a=True, __a=False, __a=False, __a=-1, __a=False, __a="last", __a=True, __a="tanh", __a=0.1, __a=5, __a=5, __a=5, __a=1, __a=2, **__a, ):
'''simple docstring'''
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : Optional[int] = d_model
_lowerCAmelCase : Tuple = n_layer
_lowerCAmelCase : List[Any] = n_head
if d_model % n_head != 0:
raise ValueError(f"'d_model % n_head' ({d_model % n_head}) should be equal to 0")
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})")
_lowerCAmelCase : Optional[int] = d_model // n_head
_lowerCAmelCase : List[str] = ff_activation
_lowerCAmelCase : Tuple = d_inner
_lowerCAmelCase : List[Any] = untie_r
_lowerCAmelCase : List[str] = attn_type
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Any = layer_norm_eps
_lowerCAmelCase : List[Any] = dropout
_lowerCAmelCase : Optional[int] = mem_len
_lowerCAmelCase : Union[str, Any] = reuse_len
_lowerCAmelCase : List[str] = bi_data
_lowerCAmelCase : List[str] = clamp_len
_lowerCAmelCase : Any = same_length
_lowerCAmelCase : List[str] = summary_type
_lowerCAmelCase : int = summary_use_proj
_lowerCAmelCase : Optional[Any] = summary_activation
_lowerCAmelCase : Tuple = summary_last_dropout
_lowerCAmelCase : Union[str, Any] = start_n_top
_lowerCAmelCase : Optional[int] = end_n_top
_lowerCAmelCase : Tuple = bos_token_id
_lowerCAmelCase : List[Any] = pad_token_id
_lowerCAmelCase : Dict = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead.", __a, )
_lowerCAmelCase : Union[str, Any] = kwargs["use_cache"]
_lowerCAmelCase : Union[str, Any] = use_mems_eval
_lowerCAmelCase : Any = use_mems_train
super().__init__(pad_token_id=__a, bos_token_id=__a, eos_token_id=__a, **__a)
@property
def snake_case__ ( self):
'''simple docstring'''
logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit.")
return -1
@max_position_embeddings.setter
def snake_case__ ( self, __a):
'''simple docstring'''
raise NotImplementedError(
f"The model {self.model_type} is one of the few models that has no sequence length limit.")
| 658 | 0 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = "https://openaipublic.azureedge.net/jukebox/models/"
_snake_case = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def A ( _lowerCamelCase ):
'''simple docstring'''
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : int = key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : Union[str, Any] = key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : int = key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
_lowerCAmelCase : List[str] = key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
_lowerCAmelCase : int = key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_lowerCAmelCase : int = key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
_lowerCAmelCase : Tuple = key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = {}
import re
_lowerCAmelCase : Union[str, Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[str] = re.compile(
r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[str] = re.compile(
r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : int = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(
r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : Optional[int] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_encoder_block_conv_in.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : List[Any] = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : str = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : Tuple = re_encoder_block_conv_in.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = re_encoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : str = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : str = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Union[str, Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
_lowerCAmelCase : Optional[Any] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : int = prefix + resnet_block
_lowerCAmelCase : int = re_encoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_proj_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = re_encoder_block_proj_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
_lowerCAmelCase : str = re_encoder_block_proj_out.sub(_lowerCamelCase , _lowerCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_decoder_block_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Optional[int] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : str = re_decoder_block_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_decoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Union[str, Any] = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Optional[Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
_lowerCAmelCase : Optional[int] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : Dict = prefix + resnet_block
_lowerCAmelCase : Dict = re_decoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_decoder_block_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
_lowerCAmelCase : Any = re_decoder_block_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_prior_cond_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Tuple = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : Optional[int] = re_prior_cond_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_prior_cond_resnet.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : Union[str, Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : List[str] = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Optional[Any] = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
_lowerCAmelCase : Tuple = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : List[Any] = prefix + resnet_block
_lowerCAmelCase : Optional[Any] = re_prior_cond_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : int = re_prior_cond_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = regex_match.groups()
_lowerCAmelCase : Optional[int] = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
_lowerCAmelCase : List[str] = re_prior_cond_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# keep original key
else:
_lowerCAmelCase : Optional[int] = original_key
_lowerCAmelCase : Tuple = replace_key(_lowerCamelCase )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
_lowerCAmelCase : Any = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
_lowerCAmelCase : Tuple = original_key
_lowerCAmelCase : List[Any] = original_key
_lowerCAmelCase : Optional[int] = value
return new_dict
@torch.no_grad()
def A ( _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
_lowerCAmelCase : List[Any] = requests.get(F"{PREFIX}{file}" , allow_redirects=_lowerCamelCase )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=_lowerCamelCase )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , "wb" ).write(r.content )
_lowerCAmelCase : Optional[Any] = MODEL_MAPPING[model_name.split("/" )[-1]]
_lowerCAmelCase : Tuple = JukeboxConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = JukeboxModel(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : List[Any] = {}
for i, dict_name in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Any = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )["model"]
_lowerCAmelCase : Union[str, Any] = {}
for k in old_dic.keys():
if k.endswith(".b" ):
_lowerCAmelCase : Dict = old_dic[k]
elif k.endswith(".w" ):
_lowerCAmelCase : Tuple = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_lowerCAmelCase : str = old_dic[k]
else:
_lowerCAmelCase : Union[str, Any] = old_dic[k]
_lowerCAmelCase : Union[str, Any] = "vqvae" if i == 0 else F"priors.{3 - i}"
_lowerCAmelCase : Union[str, Any] = fix_jukebox_keys(_lowerCamelCase , model.state_dict() , _lowerCamelCase , _lowerCamelCase )
weight_dict.append(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
with open(F"{pytorch_dump_folder_path}/mapping.json" , "w" ) as txtfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
return weight_dict
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
_snake_case = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 715 |
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(100, 0.25) = }''')
print(f'''{price_plus_tax(125.50, 0.05) = }''')
| 658 | 0 |
def A ( _lowerCamelCase ):
'''simple docstring'''
if length <= 0 or not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(_lowerCamelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 716 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'upernet'
def __init__( self, __a=None, __a=512, __a=0.02, __a=[1, 2, 3, 6], __a=True, __a=0.4, __a=384, __a=256, __a=1, __a=False, __a=255, **__a, ):
'''simple docstring'''
super().__init__(**__a)
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
_lowerCAmelCase : List[str] = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"])
elif isinstance(__a, __a):
_lowerCAmelCase : List[Any] = backbone_config.get("model_type")
_lowerCAmelCase : Dict = CONFIG_MAPPING[backbone_model_type]
_lowerCAmelCase : Optional[Any] = config_class.from_dict(__a)
_lowerCAmelCase : Tuple = backbone_config
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : str = pool_scales
_lowerCAmelCase : List[str] = use_auxiliary_head
_lowerCAmelCase : Dict = auxiliary_loss_weight
_lowerCAmelCase : Tuple = auxiliary_in_channels
_lowerCAmelCase : Optional[Any] = auxiliary_channels
_lowerCAmelCase : str = auxiliary_num_convs
_lowerCAmelCase : Union[str, Any] = auxiliary_concat_input
_lowerCAmelCase : Dict = loss_ignore_index
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = copy.deepcopy(self.__dict__)
_lowerCAmelCase : List[Any] = self.backbone_config.to_dict()
_lowerCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 658 | 0 |
import heapq as hq
import math
from collections.abc import Iterator
class UpperCAmelCase_ :
def __init__( self, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = str(id_)
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : Tuple = {} # {vertex:distance}
def __lt__( self, __a):
'''simple docstring'''
return self.key < other.key
def __repr__( self):
'''simple docstring'''
return self.id
def snake_case__ ( self, __a):
'''simple docstring'''
self.neighbors.append(__a)
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = weight
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , _lowerCamelCase )
graph[b - 1].add_edge(graph[a - 1] , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = []
for u in graph:
_lowerCAmelCase : List[Any] = math.inf
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : str = 0
_lowerCAmelCase : Any = graph[:]
while q:
_lowerCAmelCase : Any = min(_lowerCamelCase )
q.remove(_lowerCamelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_lowerCAmelCase : Union[str, Any] = u
_lowerCAmelCase : str = u.edges[v.id]
for i in range(1 , len(_lowerCamelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
for u in graph:
_lowerCAmelCase : str = math.inf
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : Any = 0
_lowerCAmelCase : Any = list(_lowerCamelCase )
hq.heapify(_lowerCamelCase )
while h:
_lowerCAmelCase : List[Any] = hq.heappop(_lowerCamelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_lowerCAmelCase : str = u
_lowerCAmelCase : Dict = u.edges[v.id]
hq.heapify(_lowerCamelCase )
for i in range(1 , len(_lowerCamelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def A ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
import baseaa
def A ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaaencode(string.encode("utf-8" ) )
def A ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaadecode(_lowerCamelCase ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"google/vivit-b-16x2-kinetics400": (
"https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'vivit'
def __init__( self, __a=224, __a=32, __a=[2, 16, 16], __a=3, __a=768, __a=12, __a=12, __a=3072, __a="gelu_fast", __a=0.0, __a=0.0, __a=0.02, __a=1E-06, __a=True, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = hidden_size
_lowerCAmelCase : Dict = num_hidden_layers
_lowerCAmelCase : Tuple = num_attention_heads
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : Dict = hidden_act
_lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase : int = attention_probs_dropout_prob
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : List[str] = layer_norm_eps
_lowerCAmelCase : List[str] = image_size
_lowerCAmelCase : str = num_frames
_lowerCAmelCase : Optional[Any] = tubelet_size
_lowerCAmelCase : Optional[int] = num_channels
_lowerCAmelCase : List[str] = qkv_bias
super().__init__(**__a)
| 718 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'data2vec-vision'
def __init__( self, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.0, __a=0.0, __a=0.02, __a=1E-12, __a=224, __a=16, __a=3, __a=False, __a=False, __a=False, __a=False, __a=0.1, __a=0.1, __a=True, __a=[3, 5, 7, 11], __a=[1, 2, 3, 6], __a=True, __a=0.4, __a=256, __a=1, __a=False, __a=255, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Dict = hidden_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : int = hidden_dropout_prob
_lowerCAmelCase : Dict = attention_probs_dropout_prob
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : List[str] = layer_norm_eps
_lowerCAmelCase : Optional[int] = image_size
_lowerCAmelCase : List[Any] = patch_size
_lowerCAmelCase : Optional[Any] = num_channels
_lowerCAmelCase : str = use_mask_token
_lowerCAmelCase : List[str] = use_absolute_position_embeddings
_lowerCAmelCase : str = use_relative_position_bias
_lowerCAmelCase : List[str] = use_shared_relative_position_bias
_lowerCAmelCase : List[str] = layer_scale_init_value
_lowerCAmelCase : List[Any] = drop_path_rate
_lowerCAmelCase : Union[str, Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
_lowerCAmelCase : Tuple = out_indices
_lowerCAmelCase : Tuple = pool_scales
# auxiliary head attributes (semantic segmentation)
_lowerCAmelCase : Optional[int] = use_auxiliary_head
_lowerCAmelCase : Optional[Any] = auxiliary_loss_weight
_lowerCAmelCase : int = auxiliary_channels
_lowerCAmelCase : Optional[Any] = auxiliary_num_convs
_lowerCAmelCase : int = auxiliary_concat_input
_lowerCAmelCase : Dict = semantic_loss_ignore_index
class UpperCAmelCase_ ( a):
lowerCamelCase__ = version.parse('1.11')
@property
def snake_case__ ( self):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def snake_case__ ( self):
'''simple docstring'''
return 1E-4
| 658 | 0 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] )
@pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] )
@pytest.mark.parametrize("revision" , [None, "v2"] )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = hf_hub_url(repo_id=_lowerCamelCase , path=_lowerCamelCase , revision=_lowerCamelCase )
assert url == F"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(_lowerCamelCase )}"
| 719 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_snake_case = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = XLNetConfig.from_json_file(_lowerCamelCase )
_lowerCAmelCase : Any = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"Building PyTorch XLNetForSequenceClassification model from configuration: {config}" )
_lowerCAmelCase : Any = finetuning_task
_lowerCAmelCase : Any = GLUE_TASKS_NUM_LABELS[finetuning_task]
_lowerCAmelCase : Union[str, Any] = XLNetForSequenceClassification(_lowerCamelCase )
elif "squad" in finetuning_task:
_lowerCAmelCase : Union[str, Any] = finetuning_task
_lowerCAmelCase : Any = XLNetForQuestionAnswering(_lowerCamelCase )
else:
_lowerCAmelCase : Union[str, Any] = XLNetLMHeadModel(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
_lowerCAmelCase : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase )
print(F"Save PyTorch model to {os.path.abspath(_lowerCamelCase )}" )
torch.save(model.state_dict() , _lowerCamelCase )
print(F"Save configuration file to {os.path.abspath(_lowerCamelCase )}" )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
_snake_case = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 658 | 0 |
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
def __init__( self, *__a, **__a):
'''simple docstring'''
warnings.warn(
"The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use DeformableDetrImageProcessor instead.", __a, )
super().__init__(*__a, **__a)
| 720 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
_snake_case = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
_snake_case = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
_snake_case = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def snake_case__ ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string", id="token"), id="sequence"),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string", id="token"), id="sequence"), id="references"),
}), codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"], reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
], )
def snake_case__ ( self, __a, __a, __a=4, __a=False):
'''simple docstring'''
_lowerCAmelCase : List[str] = compute_bleu(
reference_corpus=__a, translation_corpus=__a, max_order=__a, smooth=__a)
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Dict = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 658 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/convnextv2-tiny-1k-224": "https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json",
}
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = 'convnextv2'
def __init__( self, __a=3, __a=4, __a=4, __a=None, __a=None, __a="gelu", __a=0.02, __a=1E-12, __a=0.0, __a=224, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : str = num_channels
_lowerCAmelCase : int = patch_size
_lowerCAmelCase : Dict = num_stages
_lowerCAmelCase : int = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
_lowerCAmelCase : List[Any] = [3, 3, 9, 3] if depths is None else depths
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Optional[Any] = layer_norm_eps
_lowerCAmelCase : int = drop_path_rate
_lowerCAmelCase : Optional[int] = image_size
_lowerCAmelCase : str = ["stem"] + [f"stage{idx}" for idx in range(1, len(self.depths) + 1)]
_lowerCAmelCase : Optional[int] = get_aligned_output_features_output_indices(
out_features=__a, out_indices=__a, stage_names=self.stage_names)
| 721 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Dict = OmegaConf.load(_lowerCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(_lowerCamelCase ) ) )
return config
def A ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
if conf_path is None:
_lowerCAmelCase : Union[str, Any] = "./model_checkpoints/vqgan_only.yaml"
_lowerCAmelCase : Tuple = load_config(_lowerCamelCase , display=_lowerCamelCase )
_lowerCAmelCase : str = VQModel(**config.model.params )
if ckpt_path is None:
_lowerCAmelCase : Optional[int] = "./model_checkpoints/vqgan_only.pt"
_lowerCAmelCase : int = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
if ".ckpt" in ckpt_path:
_lowerCAmelCase : List[Any] = sd["state_dict"]
model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
model.to(_lowerCamelCase )
del sd
return model
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = model.encode(_lowerCamelCase )
print(F"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
_lowerCAmelCase : int = model.decode(_lowerCamelCase )
return xrec
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[str] = string.rsplit("." , 1 )
if reload:
_lowerCAmelCase : Dict = importlib.import_module(_lowerCamelCase )
importlib.reload(_lowerCamelCase )
return getattr(importlib.import_module(_lowerCamelCase , package=_lowerCamelCase ) , cls )
def A ( _lowerCamelCase ):
'''simple docstring'''
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" , {} ) )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True , _lowerCamelCase=True ):
'''simple docstring'''
_lowerCAmelCase : str = instantiate_from_config(_lowerCamelCase )
if sd is not None:
model.load_state_dict(_lowerCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if ckpt:
_lowerCAmelCase : Optional[int] = torch.load(_lowerCamelCase , map_location="cpu" )
_lowerCAmelCase : int = pl_sd["global_step"]
print(F"loaded model from global step {global_step}." )
else:
_lowerCAmelCase : Optional[int] = {"state_dict": None}
_lowerCAmelCase : Any = None
_lowerCAmelCase : Optional[int] = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=_lowerCamelCase , eval_mode=_lowerCamelCase )["model"]
return model, global_step
| 658 | 0 |
from __future__ import annotations
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase : List[str] = len(_lowerCamelCase )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([". " * i + "Q " + ". " * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(_lowerCamelCase ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , _lowerCamelCase , _lowerCamelCase , )
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : list[list[str]] = []
depth_first_search([] , [] , [] , _lowerCamelCase , _lowerCamelCase )
# Print all the boards
for board in boards:
for column in board:
print(_lowerCamelCase )
print("" )
print(len(_lowerCamelCase ) , "solutions were found." )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 700 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'roc_bert'
def __init__( self, __a=3_0522, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=2, __a=0.02, __a=1E-12, __a=True, __a=0, __a="absolute", __a=None, __a=True, __a=True, __a=768, __a=910, __a=512, __a=2_4858, __a=True, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = vocab_size
_lowerCAmelCase : Optional[Any] = max_position_embeddings
_lowerCAmelCase : Union[str, Any] = hidden_size
_lowerCAmelCase : str = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Optional[Any] = type_vocab_size
_lowerCAmelCase : int = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = use_cache
_lowerCAmelCase : Optional[int] = enable_pronunciation
_lowerCAmelCase : Dict = enable_shape
_lowerCAmelCase : Optional[Any] = pronunciation_embed_dim
_lowerCAmelCase : Any = pronunciation_vocab_size
_lowerCAmelCase : List[str] = shape_embed_dim
_lowerCAmelCase : int = shape_vocab_size
_lowerCAmelCase : Optional[int] = concat_input
_lowerCAmelCase : Dict = position_embedding_type
_lowerCAmelCase : Tuple = classifier_dropout
super().__init__(pad_token_id=__a, **__a)
| 658 | 0 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
_snake_case = TypeVar("T")
class UpperCAmelCase_ ( Generic[T]):
def __init__( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = data
_lowerCAmelCase : Node[T] | None = None
def __str__( self):
'''simple docstring'''
return f"{self.data}"
class UpperCAmelCase_ ( Generic[T]):
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Node[T] | None = None
def __iter__( self):
'''simple docstring'''
_lowerCAmelCase : str = self.top
while node:
yield node.data
_lowerCAmelCase : List[str] = node.next
def __str__( self):
'''simple docstring'''
return "->".join([str(__a) for item in self])
def __len__( self):
'''simple docstring'''
return len(tuple(iter(self)))
def snake_case__ ( self):
'''simple docstring'''
return self.top is None
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = Node(__a)
if not self.is_empty():
_lowerCAmelCase : int = self.top
_lowerCAmelCase : List[str] = node
def snake_case__ ( self):
'''simple docstring'''
if self.is_empty():
raise IndexError("pop from empty stack")
assert isinstance(self.top, __a)
_lowerCAmelCase : List[Any] = self.top
_lowerCAmelCase : Union[str, Any] = self.top.next
return pop_node.data
def snake_case__ ( self):
'''simple docstring'''
if self.is_empty():
raise IndexError("peek from empty stack")
assert self.top is not None
return self.top.data
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 701 |
from __future__ import annotations
def A ( _lowerCamelCase ):
'''simple docstring'''
if not nums:
raise ValueError("List is empty" )
return sum(_lowerCamelCase ) / len(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 0 |
from collections import defaultdict
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = first_str.lower().strip()
_lowerCAmelCase : Union[str, Any] = second_str.lower().strip()
# Remove whitespace
_lowerCAmelCase : List[str] = first_str.replace(" " , "" )
_lowerCAmelCase : int = second_str.replace(" " , "" )
# Strings of different lengths are not anagrams
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
return False
# Default values for count should be 0
_lowerCAmelCase : defaultdict[str, int] = defaultdict(_lowerCamelCase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(_lowerCamelCase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
_snake_case = input("Enter the first string ").strip()
_snake_case = input("Enter the second string ").strip()
_snake_case = check_anagrams(input_a, input_b)
print(f'''{input_a} and {input_b} are {"" if status else "not "}anagrams.''')
| 702 |
def A ( _lowerCamelCase ):
'''simple docstring'''
if length <= 0 or not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(_lowerCamelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 658 | 0 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def A ( *_lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase=True , _lowerCamelCase=2 ):
'''simple docstring'''
from .. import __version__
_lowerCAmelCase : str = take_from
_lowerCAmelCase : int = ()
if not isinstance(args[0] , _lowerCamelCase ):
_lowerCAmelCase : List[Any] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(_lowerCamelCase ).base_version ) >= version.parse(_lowerCamelCase ):
raise ValueError(
F"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"
F" version {__version__} is >= {version_name}" )
_lowerCAmelCase : Optional[int] = None
if isinstance(_lowerCamelCase , _lowerCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(_lowerCamelCase ),)
_lowerCAmelCase : Optional[Any] = F"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
elif hasattr(_lowerCamelCase , _lowerCamelCase ):
values += (getattr(_lowerCamelCase , _lowerCamelCase ),)
_lowerCAmelCase : Tuple = F"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
elif deprecated_kwargs is None:
_lowerCAmelCase : Union[str, Any] = F"`{attribute}` is deprecated and will be removed in version {version_name}."
if warning is not None:
_lowerCAmelCase : str = warning + " " if standard_warn else ""
warnings.warn(warning + message , _lowerCamelCase , stacklevel=_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) > 0:
_lowerCAmelCase : List[Any] = inspect.getouterframes(inspect.currentframe() )[1]
_lowerCAmelCase : Dict = call_frame.filename
_lowerCAmelCase : str = call_frame.lineno
_lowerCAmelCase : Union[str, Any] = call_frame.function
_lowerCAmelCase : Optional[Any] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" )
if len(_lowerCamelCase ) == 0:
return
elif len(_lowerCamelCase ) == 1:
return values[0]
return values
| 703 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def A ( _lowerCamelCase ):
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = np.nan
for i in range(_lowerCamelCase ):
_lowerCAmelCase : Tuple = features[:, labels == i]
_lowerCAmelCase : Dict = data.mean(1 )
# Centralize the data of class i
_lowerCAmelCase : Union[str, Any] = data - column_reshape(_lowerCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_lowerCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCAmelCase : int = np.dot(_lowerCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = features.mean(1 )
_lowerCAmelCase : List[str] = np.nan
for i in range(_lowerCamelCase ):
_lowerCAmelCase : str = features[:, labels == i]
_lowerCAmelCase : Optional[Any] = data.shape[1]
_lowerCAmelCase : Optional[Any] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase ) , (column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCAmelCase : Optional[Any] = device_data * np.dot(
column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase ) , (column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase )).T , )
return covariance_sum / features.shape[1]
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if features.any():
_lowerCAmelCase : List[Any] = features.mean(1 )
# Center the dataset
_lowerCAmelCase : List[Any] = features - np.reshape(_lowerCamelCase , (data_mean.size, 1) )
_lowerCAmelCase : Optional[Any] = np.dot(_lowerCamelCase , centered_data.T ) / features.shape[1]
_lowerCAmelCase , _lowerCAmelCase : List[Any] = np.linalg.eigh(_lowerCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
_lowerCAmelCase : Union[str, Any] = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
_lowerCAmelCase : List[Any] = np.dot(filtered_eigenvectors.T , _lowerCamelCase )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_lowerCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_lowerCAmelCase , _lowerCAmelCase : List[str] = eigh(
covariance_between_classes(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , covariance_within_classes(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
_lowerCAmelCase : List[str] = eigenvectors[:, ::-1][:, :dimensions]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = np.linalg.svd(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = svd_matrix[:, 0:dimensions]
_lowerCAmelCase : str = np.dot(filtered_svd_matrix.T , _lowerCamelCase )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_lowerCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
_lowerCAmelCase : List[Any] = np.array([0, 0, 0, 1, 1] )
_lowerCAmelCase : List[Any] = 2
_lowerCAmelCase : Union[str, Any] = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_lowerCamelCase ) as error_info:
_lowerCAmelCase : Union[str, Any] = linear_discriminant_analysis(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if isinstance(_lowerCamelCase , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
_lowerCAmelCase : List[str] = 2
_lowerCAmelCase : List[Any] = np.array([[6.92_82_03_23, 8.66_02_54_04, 10.39_23_04_85], [3.0, 3.0, 3.0]] )
with pytest.raises(_lowerCamelCase ) as error_info:
_lowerCAmelCase : Tuple = principal_component_analysis(_lowerCamelCase , _lowerCamelCase )
if not np.allclose(_lowerCamelCase , _lowerCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 0 |
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
_snake_case = logging.getLogger(__name__)
_snake_case = "pytorch_model.bin"
@dataclasses.dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = dataclasses.field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models.'})
lowerCamelCase__ = dataclasses.field(
default=a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co.'} , )
@dataclasses.dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = dataclasses.field(metadata={'help': 'A csv or a json file containing the training data.'})
lowerCamelCase__ = dataclasses.field(metadata={'help': 'A csv or a json file containing the data to predict on.'})
lowerCamelCase__ = dataclasses.field(
default=a , metadata={'help': 'A csv or a json file containing the validation data.'})
lowerCamelCase__ = dataclasses.field(
default=a , metadata={'help': 'The name of the task to train on.'} , )
lowerCamelCase__ = dataclasses.field(
default=a , metadata={'help': 'The list of labels for the task.'})
@dataclasses.dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = dataclasses.field(
metadata={'help': 'The output directory where the model predictions and checkpoints will be written.'})
lowerCamelCase__ = dataclasses.field(
default='accuracy' , metadata={'help': 'The evaluation metric used for the task.'})
lowerCamelCase__ = dataclasses.field(
default='no' , metadata={
'help': 'The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]'
} , )
lowerCamelCase__ = dataclasses.field(
default=10 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
lowerCamelCase__ = dataclasses.field(
default=0.0 , metadata={
'help': 'How much the specified evaluation metric must improve to satisfy early stopping conditions.'
} , )
lowerCamelCase__ = dataclasses.field(
default=a , metadata={'help': 'Whether to filter the pseudo-labeled data based on the confidence score.'} , )
lowerCamelCase__ = dataclasses.field(
default=a , metadata={'help': 'Whether to filter the pseudo-labeled data based on the validation performance.'} , )
lowerCamelCase__ = dataclasses.field(
default=a , metadata={'help': 'Whether to fine-tune on labeled data after pseudo training.'} , )
lowerCamelCase__ = dataclasses.field(
default=0.0 , metadata={'help': 'Confidence threshold for pseudo-labeled data filtering.'} , )
lowerCamelCase__ = dataclasses.field(
default=100 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
lowerCamelCase__ = dataclasses.field(
default=a , metadata={'help': 'Random seed for initialization.'} , )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
_lowerCAmelCase : Union[str, Any] = dataset.filter(lambda _lowerCamelCase : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
_lowerCAmelCase : Optional[Any] = int(eval_result * len(_lowerCamelCase ) )
print(_lowerCamelCase )
_lowerCAmelCase : str = dataset.sort("probability" , reverse=_lowerCamelCase )
_lowerCAmelCase : Tuple = dataset.select(range(_lowerCamelCase ) )
_lowerCAmelCase : Optional[Any] = dataset.remove_columns(["label", "probability"] )
_lowerCAmelCase : Union[str, Any] = dataset.rename_column("prediction" , "label" )
_lowerCAmelCase : List[str] = dataset.map(lambda _lowerCamelCase : {"label": idalabel[example["label"]]} )
_lowerCAmelCase : List[Any] = dataset.shuffle(seed=args.seed )
_lowerCAmelCase : Any = os.path.join(_lowerCamelCase , F"train_pseudo.{args.data_file_extension}" )
if args.data_file_extension == "csv":
dataset.to_csv(_lowerCamelCase , index=_lowerCamelCase )
else:
dataset.to_json(_lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
_lowerCAmelCase : Optional[Any] = STModelArguments(model_name_or_path=_lowerCamelCase )
_lowerCAmelCase : Any = STDataArguments(train_file=_lowerCamelCase , infer_file=_lowerCamelCase )
_lowerCAmelCase : Optional[int] = STTrainingArguments(output_dir=_lowerCamelCase )
_lowerCAmelCase : List[Any] = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(_lowerCamelCase ).items():
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
for key, value in kwargs.items():
if hasattr(_lowerCamelCase , _lowerCamelCase ):
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Sanity checks
_lowerCAmelCase : Dict = {}
_lowerCAmelCase : int = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
_lowerCAmelCase : str = args.train_file
_lowerCAmelCase : List[str] = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
_lowerCAmelCase : Optional[int] = args.eval_file
for key in data_files:
_lowerCAmelCase : List[Any] = data_files[key].split("." )[-1]
assert extension in ["csv", "json"], F"`{key}_file` should be a csv or a json file."
if args.data_file_extension is None:
_lowerCAmelCase : List[str] = extension
else:
assert extension == args.data_file_extension, F"`{key}_file` should be a {args.data_file_extension} file`."
assert (
args.eval_metric in datasets.list_metrics()
), F"{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("Creating the initial data directory for self-training..." )
_lowerCAmelCase : Union[str, Any] = F"{args.output_dir}/self-train_iter-{{}}".format
_lowerCAmelCase : Dict = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
accelerator.wait_for_everyone()
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Any = False
# Show the progress bar
_lowerCAmelCase : str = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
_lowerCAmelCase : Union[str, Any] = data_dir_format(_lowerCamelCase )
assert os.path.exists(_lowerCamelCase )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
_lowerCAmelCase : int = os.path.join(_lowerCamelCase , "stage-1" )
_lowerCAmelCase : List[Any] = {
"accelerator": accelerator,
"model_name_or_path": args.model_name_or_path,
"cache_dir": args.cache_dir,
"do_train": True,
"train_file": data_files["train"] if iteration == 0 else data_files["train_pseudo"],
"do_eval": True if args.eval_file is not None else False,
"eval_file": data_files["eval"],
"do_predict": True,
"infer_file": data_files["infer"],
"task_name": args.task_name,
"label_list": args.label_list,
"output_dir": current_output_dir,
"eval_metric": args.eval_metric,
"evaluation_strategy": args.evaluation_strategy,
"early_stopping_patience": args.early_stopping_patience,
"early_stopping_threshold": args.early_stopping_threshold,
"seed": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(_lowerCamelCase , _lowerCamelCase ):
arguments_dict.update({key: value} )
_lowerCAmelCase : Optional[Any] = os.path.join(_lowerCamelCase , "best-checkpoint" , _lowerCamelCase )
if os.path.exists(_lowerCamelCase ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1." , _lowerCamelCase , _lowerCamelCase , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 1 *****" , _lowerCamelCase )
finetune(**_lowerCamelCase )
accelerator.wait_for_everyone()
assert os.path.exists(_lowerCamelCase )
logger.info("Self-training job completed: iteration: %d, stage: 1." , _lowerCamelCase )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
_lowerCAmelCase : Any = os.path.join(_lowerCamelCase , "best-checkpoint" )
_lowerCAmelCase : int = os.path.join(_lowerCamelCase , "stage-2" )
# Update arguments_dict
_lowerCAmelCase : str = model_path
_lowerCAmelCase : Optional[Any] = data_files["train"]
_lowerCAmelCase : Union[str, Any] = current_output_dir
_lowerCAmelCase : Optional[int] = os.path.join(_lowerCamelCase , "best-checkpoint" , _lowerCamelCase )
if os.path.exists(_lowerCamelCase ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2." , _lowerCamelCase , _lowerCamelCase , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 2 *****" , _lowerCamelCase )
finetune(**_lowerCamelCase )
accelerator.wait_for_everyone()
assert os.path.exists(_lowerCamelCase )
logger.info("Self-training job completed: iteration: %d, stage: 2." , _lowerCamelCase )
_lowerCAmelCase : int = iteration
_lowerCAmelCase : Optional[Any] = data_dir_format(iteration + 1 )
_lowerCAmelCase : List[Any] = AutoConfig.from_pretrained(os.path.join(_lowerCamelCase , "best-checkpoint" ) )
_lowerCAmelCase : Any = config.idalabel
_lowerCAmelCase : Optional[int] = os.path.join(_lowerCamelCase , "eval_results_best-checkpoint.json" )
_lowerCAmelCase : Any = os.path.join(_lowerCamelCase , "test_results_best-checkpoint.json" )
assert os.path.exists(_lowerCamelCase )
with open(_lowerCamelCase , "r" ) as f:
_lowerCAmelCase : Dict = float(json.load(_lowerCamelCase )[args.eval_metric] )
_lowerCAmelCase : int = os.path.join(_lowerCamelCase , "infer_output_best-checkpoint.csv" )
assert os.path.exists(_lowerCamelCase )
# Loading the dataset from local csv or json files.
_lowerCAmelCase : Union[str, Any] = load_dataset(args.data_file_extension , data_files={"data": data_files["infer"]} )["data"]
_lowerCAmelCase : Dict = load_dataset("csv" , data_files={"data": infer_output_file} )["data"]
if accelerator.is_main_process:
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
shutil.copy(_lowerCamelCase , os.path.join(_lowerCamelCase , F"eval_results_iter-{iteration}.json" ) )
if os.path.exists(_lowerCamelCase ):
shutil.copy(_lowerCamelCase , os.path.join(_lowerCamelCase , F"test_results_iter-{iteration}.json" ) )
create_pseudo_labeled_data(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
accelerator.wait_for_everyone()
_lowerCAmelCase : List[Any] = os.path.join(_lowerCamelCase , F"train_pseudo.{args.data_file_extension}" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
_lowerCAmelCase : Optional[Any] = eval_result
if best_iteration is None:
_lowerCAmelCase : Tuple = new_iteration
_lowerCAmelCase : Union[str, Any] = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
_lowerCAmelCase : Dict = new_iteration
_lowerCAmelCase : str = new_eval_result
_lowerCAmelCase : Union[str, Any] = 0
else:
if new_eval_result == best_eval_result:
_lowerCAmelCase : Any = new_iteration
_lowerCAmelCase : Union[str, Any] = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
_lowerCAmelCase : Union[str, Any] = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("Best iteration: %d" , _lowerCamelCase )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , _lowerCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_lowerCamelCase , F"eval_results_iter-{iteration}.json" ) , os.path.join(_lowerCamelCase , "eval_results_best-iteration.json" ) , )
else:
# Assume that the last iteration is the best
logger.info("Best iteration: %d" , args.max_selftrain_iterations - 1 )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , _lowerCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_lowerCamelCase , F"eval_results_iter-{args.max_selftrain_iterations - 1}.json" ) , os.path.join(_lowerCamelCase , "eval_results_best-iteration.json" ) , )
| 704 |
import requests
from bsa import BeautifulSoup
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = BeautifulSoup(requests.get(_lowerCamelCase , params=_lowerCamelCase ).content , "html.parser" )
_lowerCAmelCase : Any = soup.find("div" , attrs={"class": "gs_ri"} )
_lowerCAmelCase : str = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
_snake_case = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 658 | 0 |
'''simple docstring'''
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def A ( _lowerCamelCase ):
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = np.max(_outputs , axis=-1 , keepdims=_lowerCamelCase )
_lowerCAmelCase : List[str] = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_lowerCamelCase )
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'sigmoid'
lowerCamelCase__ = 'softmax'
lowerCamelCase__ = 'none'
@add_end_docstrings(
a , r'\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `"default"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `"sigmoid"`: Applies the sigmoid function on the output.\n - `"softmax"`: Applies the softmax function on the output.\n - `"none"`: Does not apply any function on the output.\n ' , )
class UpperCAmelCase_ ( a):
lowerCamelCase__ = False
lowerCamelCase__ = ClassificationFunction.NONE
def __init__( self, **__a):
'''simple docstring'''
super().__init__(**__a)
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING)
def snake_case__ ( self, __a=None, __a=None, __a="", **__a):
'''simple docstring'''
_lowerCAmelCase : Any = tokenizer_kwargs
_lowerCAmelCase : List[str] = {}
if hasattr(self.model.config, "return_all_scores") and return_all_scores is None:
_lowerCAmelCase : Union[str, Any] = self.model.config.return_all_scores
if isinstance(__a, __a) or top_k is None:
_lowerCAmelCase : int = top_k
_lowerCAmelCase : Optional[Any] = False
elif return_all_scores is not None:
warnings.warn(
"`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"
" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.", __a, )
if return_all_scores:
_lowerCAmelCase : Union[str, Any] = None
else:
_lowerCAmelCase : Optional[int] = 1
if isinstance(__a, __a):
_lowerCAmelCase : str = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
_lowerCAmelCase : Dict = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self, *__a, **__a):
'''simple docstring'''
_lowerCAmelCase : Dict = super().__call__(*__a, **__a)
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
_lowerCAmelCase : int = "top_k" not in kwargs
if isinstance(args[0], __a) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def snake_case__ ( self, __a, **__a):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.framework
if isinstance(__a, __a):
return self.tokenizer(**__a, return_tensors=__a, **__a)
elif isinstance(__a, __a) and len(__a) == 1 and isinstance(inputs[0], __a) and len(inputs[0]) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0], text_pair=inputs[0][1], return_tensors=__a, **__a)
elif isinstance(__a, __a):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"
" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair.")
return self.tokenizer(__a, return_tensors=__a, **__a)
def snake_case__ ( self, __a):
'''simple docstring'''
return self.model(**__a)
def snake_case__ ( self, __a, __a=None, __a=1, __a=True):
'''simple docstring'''
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
_lowerCAmelCase : str = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
_lowerCAmelCase : List[Any] = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config, "function_to_apply") and function_to_apply is None:
_lowerCAmelCase : Optional[Any] = self.model.config.function_to_apply
else:
_lowerCAmelCase : Any = ClassificationFunction.NONE
_lowerCAmelCase : List[Any] = model_outputs["logits"][0]
_lowerCAmelCase : List[str] = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
_lowerCAmelCase : List[str] = sigmoid(__a)
elif function_to_apply == ClassificationFunction.SOFTMAX:
_lowerCAmelCase : Dict = softmax(__a)
elif function_to_apply == ClassificationFunction.NONE:
_lowerCAmelCase : List[Any] = outputs
else:
raise ValueError(f"Unrecognized `function_to_apply` argument: {function_to_apply}")
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
_lowerCAmelCase : List[str] = [
{"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(__a)
]
if not _legacy:
dict_scores.sort(key=lambda __a: x["score"], reverse=__a)
if top_k is not None:
_lowerCAmelCase : Optional[Any] = dict_scores[:top_k]
return dict_scores
| 705 |
def A ( _lowerCamelCase = 1_000_000 ):
'''simple docstring'''
_lowerCAmelCase : Any = 1
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : List[str] = {1: 1}
for inputa in range(2 , _lowerCamelCase ):
_lowerCAmelCase : int = 0
_lowerCAmelCase : Any = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
_lowerCAmelCase : Any = (3 * number) + 1
counter += 1
if inputa not in counters:
_lowerCAmelCase : Tuple = counter
if counter > pre_counter:
_lowerCAmelCase : Union[str, Any] = inputa
_lowerCAmelCase : Union[str, Any] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 658 | 0 |
'''simple docstring'''
from __future__ import annotations
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : list[list[int]] = []
_lowerCAmelCase : list[int] = []
_lowerCAmelCase : Any = 0
_lowerCAmelCase : Dict = sum(_lowerCamelCase )
create_state_space_tree(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return result
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
'''simple docstring'''
if sum(_lowerCamelCase ) > max_sum or (remaining_nums_sum + sum(_lowerCamelCase )) < max_sum:
return
if sum(_lowerCamelCase ) == max_sum:
result.append(_lowerCamelCase )
return
for index in range(_lowerCamelCase , len(_lowerCamelCase ) ):
create_state_space_tree(
_lowerCamelCase , _lowerCamelCase , index + 1 , [*path, nums[index]] , _lowerCamelCase , remaining_nums_sum - nums[index] , )
_snake_case = [3, 34, 4, 12, 5, 2]
_snake_case = 9
_snake_case = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 706 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = "https://openaipublic.azureedge.net/jukebox/models/"
_snake_case = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def A ( _lowerCamelCase ):
'''simple docstring'''
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : int = key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : Union[str, Any] = key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : int = key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
_lowerCAmelCase : List[str] = key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
_lowerCAmelCase : int = key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_lowerCAmelCase : int = key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
_lowerCAmelCase : Tuple = key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = {}
import re
_lowerCAmelCase : Union[str, Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[str] = re.compile(
r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[str] = re.compile(
r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : int = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(
r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : Optional[int] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_encoder_block_conv_in.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : List[Any] = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : str = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : Tuple = re_encoder_block_conv_in.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = re_encoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : str = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : str = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Union[str, Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
_lowerCAmelCase : Optional[Any] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : int = prefix + resnet_block
_lowerCAmelCase : int = re_encoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_proj_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = re_encoder_block_proj_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
_lowerCAmelCase : str = re_encoder_block_proj_out.sub(_lowerCamelCase , _lowerCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_decoder_block_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Optional[int] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : str = re_decoder_block_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_decoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Union[str, Any] = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Optional[Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
_lowerCAmelCase : Optional[int] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : Dict = prefix + resnet_block
_lowerCAmelCase : Dict = re_decoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_decoder_block_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
_lowerCAmelCase : Any = re_decoder_block_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_prior_cond_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Tuple = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : Optional[int] = re_prior_cond_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_prior_cond_resnet.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : Union[str, Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : List[str] = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Optional[Any] = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
_lowerCAmelCase : Tuple = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : List[Any] = prefix + resnet_block
_lowerCAmelCase : Optional[Any] = re_prior_cond_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : int = re_prior_cond_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = regex_match.groups()
_lowerCAmelCase : Optional[int] = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
_lowerCAmelCase : List[str] = re_prior_cond_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# keep original key
else:
_lowerCAmelCase : Optional[int] = original_key
_lowerCAmelCase : Tuple = replace_key(_lowerCamelCase )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
_lowerCAmelCase : Any = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
_lowerCAmelCase : Tuple = original_key
_lowerCAmelCase : List[Any] = original_key
_lowerCAmelCase : Optional[int] = value
return new_dict
@torch.no_grad()
def A ( _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
_lowerCAmelCase : List[Any] = requests.get(F"{PREFIX}{file}" , allow_redirects=_lowerCamelCase )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=_lowerCamelCase )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , "wb" ).write(r.content )
_lowerCAmelCase : Optional[Any] = MODEL_MAPPING[model_name.split("/" )[-1]]
_lowerCAmelCase : Tuple = JukeboxConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = JukeboxModel(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : List[Any] = {}
for i, dict_name in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Any = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )["model"]
_lowerCAmelCase : Union[str, Any] = {}
for k in old_dic.keys():
if k.endswith(".b" ):
_lowerCAmelCase : Dict = old_dic[k]
elif k.endswith(".w" ):
_lowerCAmelCase : Tuple = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_lowerCAmelCase : str = old_dic[k]
else:
_lowerCAmelCase : Union[str, Any] = old_dic[k]
_lowerCAmelCase : Union[str, Any] = "vqvae" if i == 0 else F"priors.{3 - i}"
_lowerCAmelCase : Union[str, Any] = fix_jukebox_keys(_lowerCamelCase , model.state_dict() , _lowerCamelCase , _lowerCamelCase )
weight_dict.append(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
with open(F"{pytorch_dump_folder_path}/mapping.json" , "w" ) as txtfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
return weight_dict
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
_snake_case = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 658 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 707 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (ksize % 2) == 0:
_lowerCAmelCase : str = ksize + 1
_lowerCAmelCase : List[str] = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(_lowerCamelCase ):
for x in range(_lowerCamelCase ):
# distance from center
_lowerCAmelCase : int = x - ksize // 2
_lowerCAmelCase : Dict = y - ksize // 2
# degree to radiant
_lowerCAmelCase : List[Any] = theta / 180 * np.pi
_lowerCAmelCase : int = np.cos(_theta )
_lowerCAmelCase : Optional[int] = np.sin(_theta )
# get kernel x
_lowerCAmelCase : int = cos_theta * px + sin_theta * py
# get kernel y
_lowerCAmelCase : str = -sin_theta * px + cos_theta * py
# fill kernel
_lowerCAmelCase : Union[str, Any] = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_snake_case = imread("../image_data/lena.jpg")
# turn image in gray scale value
_snake_case = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_snake_case = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
_snake_case = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_snake_case = out / out.max() * 255
_snake_case = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 658 | 0 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_snake_case = 10
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
for i in range(_lowerCamelCase , _lowerCamelCase ):
if array[i] == target:
return i
return -1
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = 0
_lowerCAmelCase : Union[str, Any] = len(_lowerCamelCase )
while left <= right:
if right - left < precision:
return lin_search(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[int] = (left + right) // 3 + 1
_lowerCAmelCase : Optional[int] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
_lowerCAmelCase : List[Any] = one_third - 1
elif array[two_third] < target:
_lowerCAmelCase : Any = two_third + 1
else:
_lowerCAmelCase : Optional[int] = one_third + 1
_lowerCAmelCase : Union[str, Any] = two_third - 1
else:
return -1
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if left < right:
if right - left < precision:
return lin_search(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : int = (left + right) // 3 + 1
_lowerCAmelCase : int = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(_lowerCamelCase , one_third - 1 , _lowerCamelCase , _lowerCamelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , _lowerCamelCase , _lowerCamelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = input("Enter numbers separated by comma:\n").strip()
_snake_case = [int(item.strip()) for item in user_input.split(",")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_snake_case = int(input("Enter the number to be found in the list:\n").strip())
_snake_case = ite_ternary_search(collection, target)
_snake_case = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'''Iterative search: {target} found at positions: {resulta}''')
print(f'''Recursive search: {target} found at positions: {resulta}''')
else:
print("Not found")
| 708 |
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
for i in range(1 , _lowerCamelCase ):
_lowerCAmelCase : List[Any] = collection[i]
_lowerCAmelCase : str = 0
_lowerCAmelCase : Union[str, Any] = i - 1
while low <= high:
_lowerCAmelCase : List[str] = (low + high) // 2
if val < collection[mid]:
_lowerCAmelCase : Optional[int] = mid - 1
else:
_lowerCAmelCase : List[str] = mid + 1
for j in range(_lowerCamelCase , _lowerCamelCase , -1 ):
_lowerCAmelCase : int = collection[j - 1]
_lowerCAmelCase : Optional[int] = val
return collection
if __name__ == "__main__":
_snake_case = input("Enter numbers separated by a comma:\n").strip()
_snake_case = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 658 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.txt"}
_snake_case = {
"vocab_file": {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt",
}
}
_snake_case = {
"YituTech/conv-bert-base": 512,
"YituTech/conv-bert-medium-small": 512,
"YituTech/conv-bert-small": 512,
}
_snake_case = {
"YituTech/conv-bert-base": {"do_lower_case": True},
"YituTech/conv-bert-medium-small": {"do_lower_case": True},
"YituTech/conv-bert-small": {"do_lower_case": True},
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ConvBertTokenizer
def __init__( self, __a=None, __a=None, __a=True, __a="[UNK]", __a="[SEP]", __a="[PAD]", __a="[CLS]", __a="[MASK]", __a=True, __a=None, **__a, ):
'''simple docstring'''
super().__init__(
__a, tokenizer_file=__a, do_lower_case=__a, unk_token=__a, sep_token=__a, pad_token=__a, cls_token=__a, mask_token=__a, tokenize_chinese_chars=__a, strip_accents=__a, **__a, )
_lowerCAmelCase : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("lowercase", __a) != do_lower_case
or normalizer_state.get("strip_accents", __a) != strip_accents
or normalizer_state.get("handle_chinese_chars", __a) != tokenize_chinese_chars
):
_lowerCAmelCase : Union[str, Any] = getattr(__a, normalizer_state.pop("type"))
_lowerCAmelCase : List[Any] = do_lower_case
_lowerCAmelCase : Tuple = strip_accents
_lowerCAmelCase : Tuple = tokenize_chinese_chars
_lowerCAmelCase : List[str] = normalizer_class(**__a)
_lowerCAmelCase : str = do_lower_case
def snake_case__ ( self, __a, __a=None):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [self.sep_token_id]
_lowerCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : List[str] = self._tokenizer.model.save(__a, name=__a)
return tuple(__a)
| 709 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = 'focalnet'
def __init__( self, __a=224, __a=4, __a=3, __a=96, __a=False, __a=[192, 384, 768, 768], __a=[2, 2, 6, 2], __a=[2, 2, 2, 2], __a=[3, 3, 3, 3], __a="gelu", __a=4.0, __a=0.0, __a=0.1, __a=False, __a=1E-4, __a=False, __a=False, __a=False, __a=0.02, __a=1E-5, __a=32, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : str = image_size
_lowerCAmelCase : List[str] = patch_size
_lowerCAmelCase : List[Any] = num_channels
_lowerCAmelCase : Tuple = embed_dim
_lowerCAmelCase : List[Any] = use_conv_embed
_lowerCAmelCase : Any = hidden_sizes
_lowerCAmelCase : Tuple = depths
_lowerCAmelCase : Dict = focal_levels
_lowerCAmelCase : Optional[Any] = focal_windows
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Union[str, Any] = mlp_ratio
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Dict = drop_path_rate
_lowerCAmelCase : str = use_layerscale
_lowerCAmelCase : str = layerscale_value
_lowerCAmelCase : Union[str, Any] = use_post_layernorm
_lowerCAmelCase : Optional[int] = use_post_layernorm_in_modulation
_lowerCAmelCase : str = normalize_modulator
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
_lowerCAmelCase : Any = encoder_stride
_lowerCAmelCase : List[str] = ["stem"] + [f"stage{idx}" for idx in range(1, len(self.depths) + 1)]
_lowerCAmelCase , _lowerCAmelCase : List[str] = get_aligned_output_features_output_indices(
out_features=__a, out_indices=__a, stage_names=self.stage_names)
| 658 | 0 |
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
_lowerCAmelCase : str = n - k
# Calculate C(n,k)
for i in range(_lowerCamelCase ):
result *= n - i
result //= i + 1
return result
def A ( _lowerCamelCase ):
'''simple docstring'''
return binomial_coefficient(2 * node_count , _lowerCamelCase ) // (node_count + 1)
def A ( _lowerCamelCase ):
'''simple docstring'''
if n < 0:
raise ValueError("factorial() not defined for negative values" )
_lowerCAmelCase : Any = 1
for i in range(1 , n + 1 ):
result *= i
return result
def A ( _lowerCamelCase ):
'''simple docstring'''
return catalan_number(_lowerCamelCase ) * factorial(_lowerCamelCase )
if __name__ == "__main__":
_snake_case = int(input("Enter the number of nodes: ").strip() or 0)
if node_count <= 0:
raise ValueError("We need some nodes to work with.")
print(
f'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
f'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 710 |
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def count_of_possible_combinations(_lowerCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
_lowerCamelCase , _lowerCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_lowerCAmelCase : Optional[int] = sum(
count_of_possible_combinations_with_dp_array(target - item , _lowerCamelCase )
for item in array )
_lowerCAmelCase : Any = answer
return answer
_lowerCAmelCase : List[Any] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [0] * (target + 1)
_lowerCAmelCase : List[str] = 1
for i in range(1 , target + 1 ):
for j in range(_lowerCamelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = 3
_snake_case = 5
_snake_case = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 658 | 0 |
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
for i in range(1 , _lowerCamelCase ):
_lowerCAmelCase : List[Any] = collection[i]
_lowerCAmelCase : str = 0
_lowerCAmelCase : Union[str, Any] = i - 1
while low <= high:
_lowerCAmelCase : List[str] = (low + high) // 2
if val < collection[mid]:
_lowerCAmelCase : Optional[int] = mid - 1
else:
_lowerCAmelCase : List[str] = mid + 1
for j in range(_lowerCamelCase , _lowerCamelCase , -1 ):
_lowerCAmelCase : int = collection[j - 1]
_lowerCAmelCase : Optional[int] = val
return collection
if __name__ == "__main__":
_snake_case = input("Enter numbers separated by a comma:\n").strip()
_snake_case = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 711 |
import string
def A ( _lowerCamelCase ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
_lowerCAmelCase : str = ""
for symbol in message:
if symbol in string.ascii_uppercase:
_lowerCAmelCase : List[str] = string.ascii_uppercase.find(_lowerCamelCase )
_lowerCAmelCase : Dict = num - key
if num < 0:
_lowerCAmelCase : Dict = num + len(string.ascii_uppercase )
_lowerCAmelCase : Optional[Any] = translated + string.ascii_uppercase[num]
else:
_lowerCAmelCase : int = translated + symbol
print(F"Decryption using Key #{key}: {translated}" )
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = input("Encrypted message: " )
_lowerCAmelCase : Dict = message.upper()
decrypt(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 658 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {"configuration_timm_backbone": ["TimmBackboneConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["TimmBackbone"]
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 712 |
import requests
from bsa import BeautifulSoup
def A ( _lowerCamelCase = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
_lowerCAmelCase : str = BeautifulSoup(requests.get(_lowerCamelCase ).text , "html.parser" )
_lowerCAmelCase : str = soup.findAll("h1" )
_lowerCAmelCase : Optional[int] = soup.findAll("div" , {"class": "maincounter-number"} )
keys += soup.findAll("span" , {"class": "panel-title"} )
values += soup.findAll("div" , {"class": "number-table-main"} )
return {key.text.strip(): value.text.strip() for key, value in zip(_lowerCamelCase , _lowerCamelCase )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 658 | 0 |
from __future__ import annotations
def A ( _lowerCamelCase ):
'''simple docstring'''
if len(_lowerCamelCase ) == 0:
return []
_lowerCAmelCase : List[Any] = min(_lowerCamelCase ), max(_lowerCamelCase )
_lowerCAmelCase : List[str] = int(max_value - min_value ) + 1
_lowerCAmelCase : list[list] = [[] for _ in range(_lowerCamelCase )]
for i in my_list:
buckets[int(i - min_value )].append(_lowerCamelCase )
return [v for bucket in buckets for v in sorted(_lowerCamelCase )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 713 |
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCAmelCase_ :
def __init__( self, __a, __a):
'''simple docstring'''
if len(__a) != degree + 1:
raise ValueError(
"The number of coefficients should be equal to the degree + 1.")
_lowerCAmelCase : list[float] = list(__a)
_lowerCAmelCase : Any = degree
def __add__( self, __a):
'''simple docstring'''
if self.degree > polynomial_a.degree:
_lowerCAmelCase : Dict = self.coefficients[:]
for i in range(polynomial_a.degree + 1):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree, __a)
else:
_lowerCAmelCase : Union[str, Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree, __a)
def __sub__( self, __a):
'''simple docstring'''
return self + polynomial_a * Polynomial(0, [-1])
def __neg__( self):
'''simple docstring'''
return Polynomial(self.degree, [-c for c in self.coefficients])
def __mul__( self, __a):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1):
for j in range(polynomial_a.degree + 1):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree, __a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : int | float = 0
for i in range(self.degree + 1):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = ""
for i in range(self.degree, -1, -1):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i]))
elif i == 1:
polynomial += str(abs(self.coefficients[i])) + "x"
else:
polynomial += str(abs(self.coefficients[i])) + "x^" + str(__a)
return polynomial
def __repr__( self):
'''simple docstring'''
return self.__str__()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * self.degree
for i in range(self.degree):
_lowerCAmelCase : List[Any] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1, __a)
def snake_case__ ( self, __a = 0):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * (self.degree + 2)
_lowerCAmelCase : Optional[Any] = constant
for i in range(self.degree + 1):
_lowerCAmelCase : Dict = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1, __a)
def __eq__( self, __a):
'''simple docstring'''
if not isinstance(__a, __a):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self, __a):
'''simple docstring'''
return not self.__eq__(__a)
| 658 | 0 |
from __future__ import annotations
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if b == 0:
return (1, 0)
(_lowerCAmelCase) : Any = extended_euclid(_lowerCamelCase , a % b )
_lowerCAmelCase : List[str] = a // b
return (y, x - k * y)
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
(_lowerCAmelCase) : Tuple = extended_euclid(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : List[Any] = na * na
_lowerCAmelCase : int = ra * x * na + ra * y * na
return (n % m + m) % m
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
(_lowerCAmelCase) : List[Any] = extended_euclid(_lowerCamelCase , _lowerCamelCase )
if b < 0:
_lowerCAmelCase : Dict = (b % n + n) % n
return b
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = invert_modulo(_lowerCamelCase , _lowerCamelCase ), invert_modulo(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = na * na
_lowerCAmelCase : Dict = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 714 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'xlnet'
lowerCamelCase__ = ['mems']
lowerCamelCase__ = {
'n_token': 'vocab_size', # Backward compatibility
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self, __a=3_2000, __a=1024, __a=24, __a=16, __a=4096, __a="gelu", __a=True, __a="bi", __a=0.02, __a=1E-12, __a=0.1, __a=512, __a=None, __a=True, __a=False, __a=False, __a=-1, __a=False, __a="last", __a=True, __a="tanh", __a=0.1, __a=5, __a=5, __a=5, __a=1, __a=2, **__a, ):
'''simple docstring'''
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : Optional[int] = d_model
_lowerCAmelCase : Tuple = n_layer
_lowerCAmelCase : List[Any] = n_head
if d_model % n_head != 0:
raise ValueError(f"'d_model % n_head' ({d_model % n_head}) should be equal to 0")
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})")
_lowerCAmelCase : Optional[int] = d_model // n_head
_lowerCAmelCase : List[str] = ff_activation
_lowerCAmelCase : Tuple = d_inner
_lowerCAmelCase : List[Any] = untie_r
_lowerCAmelCase : List[str] = attn_type
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Any = layer_norm_eps
_lowerCAmelCase : List[Any] = dropout
_lowerCAmelCase : Optional[int] = mem_len
_lowerCAmelCase : Union[str, Any] = reuse_len
_lowerCAmelCase : List[str] = bi_data
_lowerCAmelCase : List[str] = clamp_len
_lowerCAmelCase : Any = same_length
_lowerCAmelCase : List[str] = summary_type
_lowerCAmelCase : int = summary_use_proj
_lowerCAmelCase : Optional[Any] = summary_activation
_lowerCAmelCase : Tuple = summary_last_dropout
_lowerCAmelCase : Union[str, Any] = start_n_top
_lowerCAmelCase : Optional[int] = end_n_top
_lowerCAmelCase : Tuple = bos_token_id
_lowerCAmelCase : List[Any] = pad_token_id
_lowerCAmelCase : Dict = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead.", __a, )
_lowerCAmelCase : Union[str, Any] = kwargs["use_cache"]
_lowerCAmelCase : Union[str, Any] = use_mems_eval
_lowerCAmelCase : Any = use_mems_train
super().__init__(pad_token_id=__a, bos_token_id=__a, eos_token_id=__a, **__a)
@property
def snake_case__ ( self):
'''simple docstring'''
logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit.")
return -1
@max_position_embeddings.setter
def snake_case__ ( self, __a):
'''simple docstring'''
raise NotImplementedError(
f"The model {self.model_type} is one of the few models that has no sequence length limit.")
| 658 | 0 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("head" ):
_lowerCAmelCase : str = "segformer.encoder." + key
if key.startswith("backbone" ):
_lowerCAmelCase : Tuple = key.replace("backbone" , "segformer.encoder" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_lowerCAmelCase : List[Any] = key[key.find("patch_embed" ) + len("patch_embed" )]
_lowerCAmelCase : Optional[int] = key.replace(F"patch_embed{idx}" , F"patch_embeddings.{int(_lowerCamelCase )-1}" )
if "norm" in key:
_lowerCAmelCase : Dict = key.replace("norm" , "layer_norm" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_lowerCAmelCase : Tuple = key[key.find("segformer.encoder.layer_norm" ) + len("segformer.encoder.layer_norm" )]
_lowerCAmelCase : str = key.replace(F"layer_norm{idx}" , F"layer_norm.{int(_lowerCamelCase )-1}" )
if "layer_norm1" in key:
_lowerCAmelCase : List[str] = key.replace("layer_norm1" , "layer_norm_1" )
if "layer_norm2" in key:
_lowerCAmelCase : Dict = key.replace("layer_norm2" , "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
_lowerCAmelCase : Any = key[key.find("block" ) + len("block" )]
_lowerCAmelCase : Dict = key.replace(F"block{idx}" , F"block.{int(_lowerCamelCase )-1}" )
if "attn.q" in key:
_lowerCAmelCase : Union[str, Any] = key.replace("attn.q" , "attention.self.query" )
if "attn.proj" in key:
_lowerCAmelCase : Any = key.replace("attn.proj" , "attention.output.dense" )
if "attn" in key:
_lowerCAmelCase : Optional[int] = key.replace("attn" , "attention.self" )
if "fc1" in key:
_lowerCAmelCase : List[str] = key.replace("fc1" , "dense1" )
if "fc2" in key:
_lowerCAmelCase : Any = key.replace("fc2" , "dense2" )
if "linear_pred" in key:
_lowerCAmelCase : Optional[Any] = key.replace("linear_pred" , "classifier" )
if "linear_fuse" in key:
_lowerCAmelCase : List[Any] = key.replace("linear_fuse.conv" , "linear_fuse" )
_lowerCAmelCase : Union[str, Any] = key.replace("linear_fuse.bn" , "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_lowerCAmelCase : List[str] = key[key.find("linear_c" ) + len("linear_c" )]
_lowerCAmelCase : Optional[Any] = key.replace(F"linear_c{idx}" , F"linear_c.{int(_lowerCamelCase )-1}" )
if key.startswith("head" ):
_lowerCAmelCase : Union[str, Any] = key.replace("head" , "classifier" )
_lowerCAmelCase : int = value
return new_state_dict
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_lowerCAmelCase : Dict = state_dict.pop(F"segformer.encoder.block.{i}.{j}.attention.self.kv.weight" )
_lowerCAmelCase : Union[str, Any] = state_dict.pop(F"segformer.encoder.block.{i}.{j}.attention.self.kv.bias" )
# next, add keys and values (in that order) to the state dict
_lowerCAmelCase : Optional[int] = kv_weight[
: config.hidden_sizes[i], :
]
_lowerCAmelCase : Tuple = kv_bias[: config.hidden_sizes[i]]
_lowerCAmelCase : Tuple = kv_weight[
config.hidden_sizes[i] :, :
]
_lowerCAmelCase : Tuple = kv_bias[
config.hidden_sizes[i] :
]
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCAmelCase : Union[str, Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return image
@torch.no_grad()
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = SegformerConfig()
_lowerCAmelCase : Optional[Any] = False
# set attributes based on model_name
_lowerCAmelCase : Tuple = "huggingface/label-files"
if "segformer" in model_name:
_lowerCAmelCase : Dict = model_name[len("segformer." ) : len("segformer." ) + 2]
if "ade" in model_name:
_lowerCAmelCase : Tuple = 150
_lowerCAmelCase : int = "ade20k-id2label.json"
_lowerCAmelCase : Optional[int] = (1, 150, 128, 128)
elif "city" in model_name:
_lowerCAmelCase : Union[str, Any] = 19
_lowerCAmelCase : Any = "cityscapes-id2label.json"
_lowerCAmelCase : List[str] = (1, 19, 128, 128)
else:
raise ValueError(F"Model {model_name} not supported" )
elif "mit" in model_name:
_lowerCAmelCase : str = True
_lowerCAmelCase : List[Any] = model_name[4:6]
_lowerCAmelCase : Optional[Any] = 1_000
_lowerCAmelCase : int = "imagenet-1k-id2label.json"
_lowerCAmelCase : List[str] = (1, 1_000)
else:
raise ValueError(F"Model {model_name} not supported" )
# set config attributes
_lowerCAmelCase : Optional[int] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCAmelCase : Any = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase : str = idalabel
_lowerCAmelCase : Dict = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
_lowerCAmelCase : str = [64, 128, 320, 512]
_lowerCAmelCase : Optional[Any] = 256
elif size == "b2":
_lowerCAmelCase : str = [64, 128, 320, 512]
_lowerCAmelCase : Optional[Any] = 768
_lowerCAmelCase : Optional[Any] = [3, 4, 6, 3]
elif size == "b3":
_lowerCAmelCase : Tuple = [64, 128, 320, 512]
_lowerCAmelCase : List[Any] = 768
_lowerCAmelCase : Optional[Any] = [3, 4, 18, 3]
elif size == "b4":
_lowerCAmelCase : List[str] = [64, 128, 320, 512]
_lowerCAmelCase : Union[str, Any] = 768
_lowerCAmelCase : Tuple = [3, 8, 27, 3]
elif size == "b5":
_lowerCAmelCase : int = [64, 128, 320, 512]
_lowerCAmelCase : Dict = 768
_lowerCAmelCase : Optional[Any] = [3, 6, 40, 3]
else:
raise ValueError(F"Size {size} not supported" )
# load image processor (only resize + normalize)
_lowerCAmelCase : Any = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_lowerCamelCase , align=_lowerCamelCase , do_random_crop=_lowerCamelCase )
# prepare image
_lowerCAmelCase : int = prepare_img()
_lowerCAmelCase : Tuple = image_processor(images=_lowerCamelCase , return_tensors="pt" ).pixel_values
logger.info(F"Converting model {model_name}..." )
# load original state dict
if encoder_only:
_lowerCAmelCase : Tuple = torch.load(_lowerCamelCase , map_location=torch.device("cpu" ) )
else:
_lowerCAmelCase : List[Any] = torch.load(_lowerCamelCase , map_location=torch.device("cpu" ) )["state_dict"]
# rename keys
_lowerCAmelCase : Any = rename_keys(_lowerCamelCase , encoder_only=_lowerCamelCase )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(_lowerCamelCase , _lowerCamelCase )
# create HuggingFace model and load state dict
if encoder_only:
_lowerCAmelCase : Any = False
_lowerCAmelCase : List[str] = SegformerForImageClassification(_lowerCamelCase )
else:
_lowerCAmelCase : Tuple = SegformerForSemanticSegmentation(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
# forward pass
_lowerCAmelCase : List[str] = model(_lowerCamelCase )
_lowerCAmelCase : str = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
_lowerCAmelCase : List[str] = torch.tensor(
[
[[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]],
[[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]],
[[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
_lowerCAmelCase : Union[str, Any] = torch.tensor(
[
[[-7.58_20, -8.72_31, -8.32_15], [-8.06_00, -10.35_29, -10.03_04], [-7.52_08, -9.41_03, -9.62_39]],
[[-12.69_18, -13.89_94, -13.71_37], [-13.31_96, -15.75_23, -15.47_89], [-12.93_43, -14.87_57, -14.96_89]],
[[-11.19_11, -11.94_21, -11.32_43], [-11.33_42, -13.68_39, -13.35_81], [-10.39_09, -12.18_32, -12.48_58]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
_lowerCAmelCase : Dict = torch.tensor(
[
[[-11.81_73, -14.38_50, -16.31_28], [-14.56_48, -16.58_04, -18.65_68], [-14.72_23, -15.73_87, -18.42_18]],
[[-15.72_90, -17.91_71, -19.44_23], [-18.31_05, -19.94_48, -21.46_61], [-17.92_96, -18.64_97, -20.79_10]],
[[-15.07_83, -17.03_36, -18.27_89], [-16.87_71, -18.68_70, -20.16_12], [-16.24_54, -17.14_26, -19.50_55]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
_lowerCAmelCase : Union[str, Any] = torch.tensor(
[
[[-9.08_78, -10.20_81, -10.18_91], [-9.31_44, -10.79_41, -10.98_43], [-9.22_94, -10.38_55, -10.57_04]],
[[-12.23_16, -13.90_68, -13.61_02], [-12.91_61, -14.37_02, -14.32_35], [-12.52_33, -13.71_74, -13.79_32]],
[[-14.62_75, -15.24_90, -14.97_27], [-14.34_00, -15.96_87, -16.28_27], [-14.14_84, -15.40_33, -15.89_37]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
_lowerCAmelCase : Optional[Any] = torch.tensor(
[
[[-12.31_44, -13.24_47, -14.08_02], [-13.36_14, -14.58_16, -15.61_17], [-13.33_40, -14.44_33, -16.22_19]],
[[-19.27_81, -20.41_28, -20.75_06], [-20.61_53, -21.65_66, -22.09_98], [-19.98_00, -21.04_30, -22.14_94]],
[[-18.87_39, -19.78_04, -21.18_34], [-20.12_33, -21.67_65, -23.29_44], [-20.03_15, -21.26_41, -23.69_44]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
_lowerCAmelCase : List[Any] = torch.tensor(
[
[[-9.55_24, -12.08_35, -11.73_48], [-10.52_29, -13.64_46, -14.56_62], [-9.58_42, -12.88_51, -13.94_14]],
[[-15.34_32, -17.53_23, -17.08_18], [-16.33_30, -18.92_55, -19.21_01], [-15.13_40, -17.78_48, -18.39_71]],
[[-12.60_72, -14.94_86, -14.66_31], [-13.76_29, -17.09_07, -17.77_45], [-12.78_99, -16.16_95, -17.16_71]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
_lowerCAmelCase : Any = torch.tensor(
[
[[-11.92_95, -13.40_57, -14.81_06], [-13.34_31, -14.81_79, -15.37_81], [-14.28_36, -15.59_42, -16.15_88]],
[[-11.49_06, -12.80_67, -13.65_64], [-13.11_89, -14.05_00, -14.15_43], [-13.87_48, -14.51_36, -14.87_89]],
[[0.53_74, 0.10_67, -0.47_42], [0.11_41, -0.22_55, -0.70_99], [-0.30_00, -0.59_24, -1.31_05]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
_lowerCAmelCase : Union[str, Any] = torch.tensor(
[
[[-7.82_17, -9.87_67, -10.17_17], [-9.44_38, -10.90_58, -11.40_47], [-9.79_39, -12.34_95, -12.10_79]],
[[-7.15_14, -9.53_36, -10.08_60], [-9.77_76, -11.68_22, -11.84_39], [-10.14_11, -12.76_55, -12.89_72]],
[[0.30_21, 0.08_05, -0.23_10], [-0.03_28, -0.16_05, -0.27_14], [-0.14_08, -0.54_77, -0.69_76]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
_lowerCAmelCase : Any = torch.tensor(
[
[
[-1.1372e01, -1.2787e01, -1.3477e01],
[-1.2536e01, -1.4194e01, -1.4409e01],
[-1.3217e01, -1.4888e01, -1.5327e01],
],
[
[-1.4791e01, -1.7122e01, -1.8277e01],
[-1.7163e01, -1.9192e01, -1.9533e01],
[-1.7897e01, -1.9991e01, -2.0315e01],
],
[
[7.6723e-01, 4.1921e-01, -7.7878e-02],
[4.7772e-01, 9.5557e-03, -2.8082e-01],
[3.6032e-01, -2.4826e-01, -5.1168e-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
_lowerCAmelCase : Optional[int] = torch.tensor(
[
[[-9.49_59, -11.30_87, -11.74_79], [-11.00_25, -12.65_40, -12.33_19], [-11.40_64, -13.04_87, -12.99_05]],
[[-9.89_05, -11.30_84, -12.08_54], [-11.17_26, -12.76_98, -12.95_83], [-11.59_85, -13.32_78, -14.17_74]],
[[0.22_13, 0.01_92, -0.24_66], [-0.17_31, -0.42_13, -0.48_74], [-0.31_26, -0.65_41, -1.13_89]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
_lowerCAmelCase : Dict = torch.tensor(
[
[[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]],
[[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]],
[[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
_lowerCAmelCase : Tuple = torch.tensor(
[
[[-16.09_76, -16.48_56, -17.39_62], [-16.62_34, -19.03_42, -19.76_85], [-16.09_00, -18.06_61, -19.11_80]],
[[-18.47_50, -18.84_88, -19.50_74], [-19.40_30, -22.15_70, -22.59_77], [-19.11_91, -20.84_86, -22.37_83]],
[[-4.51_78, -5.50_37, -6.51_09], [-5.08_84, -7.21_74, -8.03_34], [-4.41_56, -5.81_17, -7.29_70]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
_lowerCAmelCase : List[str] = torch.tensor(
[
[[-14.20_81, -14.47_32, -14.19_77], [-14.58_67, -16.44_23, -16.63_56], [-13.44_41, -14.96_85, -16.86_96]],
[[-14.45_76, -14.70_73, -15.04_51], [-15.08_16, -17.62_37, -17.98_73], [-14.42_13, -16.01_99, -18.59_92]],
[[-4.73_49, -4.95_88, -5.09_66], [-4.32_10, -6.93_25, -7.25_91], [-3.43_12, -4.74_84, -7.19_17]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
_lowerCAmelCase : Optional[Any] = torch.tensor(
[
[[-11.77_37, -11.95_26, -11.32_73], [-13.66_92, -14.45_74, -13.88_78], [-13.89_37, -14.69_24, -15.93_45]],
[[-14.67_06, -14.53_30, -14.13_06], [-16.15_02, -16.81_80, -16.42_69], [-16.83_38, -17.89_39, -20.17_46]],
[[1.04_91, 0.82_89, 1.03_10], [1.10_44, 0.52_19, 0.80_55], [1.08_99, 0.69_26, 0.55_90]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
_lowerCAmelCase : str = torch.tensor(
[
[[-12.56_41, -13.47_77, -13.06_84], [-13.95_87, -15.89_83, -16.65_57], [-13.31_09, -15.73_50, -16.31_41]],
[[-14.70_74, -15.43_52, -14.59_44], [-16.63_53, -18.16_63, -18.61_20], [-15.17_02, -18.03_29, -18.15_47]],
[[-1.79_90, -2.09_51, -1.77_84], [-2.63_97, -3.82_45, -3.96_86], [-1.52_64, -2.81_26, -2.93_16]],
] )
else:
_lowerCAmelCase : Optional[Any] = logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , _lowerCamelCase , atol=1e-2 )
# finally, save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="segformer.b0.512x512.ade.160k",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
_snake_case = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 715 |
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(100, 0.25) = }''')
print(f'''{price_plus_tax(125.50, 0.05) = }''')
| 658 | 0 |
from __future__ import annotations
from random import random
class UpperCAmelCase_ :
def __init__( self, __a = None):
'''simple docstring'''
_lowerCAmelCase : Tuple = value
_lowerCAmelCase : Dict = random()
_lowerCAmelCase : Node | None = None
_lowerCAmelCase : Node | None = None
def __repr__( self):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return f"'{self.value}: {self.prior:.5}'"
else:
return pformat(
{f"{self.value}: {self.prior:.5}": (self.left, self.right)}, indent=1)
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : int = str(self.value) + " "
_lowerCAmelCase : Optional[Any] = str(self.left or "")
_lowerCAmelCase : List[str] = str(self.right or "")
return value + left + right
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
_lowerCAmelCase : Dict = split(root.left , _lowerCamelCase )
return left, root
else:
_lowerCAmelCase : Union[str, Any] = split(root.right , _lowerCamelCase )
return root, right
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
_lowerCAmelCase : Optional[int] = merge(left.right , _lowerCamelCase )
return left
else:
_lowerCAmelCase : str = merge(_lowerCamelCase , right.left )
return right
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = Node(_lowerCamelCase )
_lowerCAmelCase : List[Any] = split(_lowerCamelCase , _lowerCamelCase )
return merge(merge(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = split(_lowerCamelCase , value - 1 )
_lowerCAmelCase : List[str] = split(_lowerCamelCase , _lowerCamelCase )
return merge(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end="," )
inorder(root.right )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
_lowerCAmelCase : int = insert(_lowerCamelCase , int(arg[1:] ) )
elif arg[0] == "-":
_lowerCAmelCase : Dict = erase(_lowerCamelCase , int(arg[1:] ) )
else:
print("Unknown command" )
return root
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. " )
_lowerCAmelCase : List[Any] = input()
while args != "q":
_lowerCAmelCase : Dict = interact_treap(_lowerCamelCase , _lowerCamelCase )
print(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = input()
print("good by!" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 716 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'upernet'
def __init__( self, __a=None, __a=512, __a=0.02, __a=[1, 2, 3, 6], __a=True, __a=0.4, __a=384, __a=256, __a=1, __a=False, __a=255, **__a, ):
'''simple docstring'''
super().__init__(**__a)
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
_lowerCAmelCase : List[str] = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"])
elif isinstance(__a, __a):
_lowerCAmelCase : List[Any] = backbone_config.get("model_type")
_lowerCAmelCase : Dict = CONFIG_MAPPING[backbone_model_type]
_lowerCAmelCase : Optional[Any] = config_class.from_dict(__a)
_lowerCAmelCase : Tuple = backbone_config
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : str = pool_scales
_lowerCAmelCase : List[str] = use_auxiliary_head
_lowerCAmelCase : Dict = auxiliary_loss_weight
_lowerCAmelCase : Tuple = auxiliary_in_channels
_lowerCAmelCase : Optional[Any] = auxiliary_channels
_lowerCAmelCase : str = auxiliary_num_convs
_lowerCAmelCase : Union[str, Any] = auxiliary_concat_input
_lowerCAmelCase : Dict = loss_ignore_index
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = copy.deepcopy(self.__dict__)
_lowerCAmelCase : List[Any] = self.backbone_config.to_dict()
_lowerCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 658 | 0 |
def A ( _lowerCamelCase = 10 , _lowerCamelCase = 22 ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = range(1 , _lowerCamelCase )
_lowerCAmelCase : Optional[int] = range(1 , _lowerCamelCase )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'''{solution(10, 22) = }''')
| 717 |
import baseaa
def A ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaaencode(string.encode("utf-8" ) )
def A ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaadecode(_lowerCamelCase ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_snake_case = {"configuration_van": ["VAN_PRETRAINED_CONFIG_ARCHIVE_MAP", "VanConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"VAN_PRETRAINED_MODEL_ARCHIVE_LIST",
"VanForImageClassification",
"VanModel",
"VanPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 718 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'data2vec-vision'
def __init__( self, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.0, __a=0.0, __a=0.02, __a=1E-12, __a=224, __a=16, __a=3, __a=False, __a=False, __a=False, __a=False, __a=0.1, __a=0.1, __a=True, __a=[3, 5, 7, 11], __a=[1, 2, 3, 6], __a=True, __a=0.4, __a=256, __a=1, __a=False, __a=255, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Dict = hidden_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : int = hidden_dropout_prob
_lowerCAmelCase : Dict = attention_probs_dropout_prob
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : List[str] = layer_norm_eps
_lowerCAmelCase : Optional[int] = image_size
_lowerCAmelCase : List[Any] = patch_size
_lowerCAmelCase : Optional[Any] = num_channels
_lowerCAmelCase : str = use_mask_token
_lowerCAmelCase : List[str] = use_absolute_position_embeddings
_lowerCAmelCase : str = use_relative_position_bias
_lowerCAmelCase : List[str] = use_shared_relative_position_bias
_lowerCAmelCase : List[str] = layer_scale_init_value
_lowerCAmelCase : List[Any] = drop_path_rate
_lowerCAmelCase : Union[str, Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
_lowerCAmelCase : Tuple = out_indices
_lowerCAmelCase : Tuple = pool_scales
# auxiliary head attributes (semantic segmentation)
_lowerCAmelCase : Optional[int] = use_auxiliary_head
_lowerCAmelCase : Optional[Any] = auxiliary_loss_weight
_lowerCAmelCase : int = auxiliary_channels
_lowerCAmelCase : Optional[Any] = auxiliary_num_convs
_lowerCAmelCase : int = auxiliary_concat_input
_lowerCAmelCase : Dict = semantic_loss_ignore_index
class UpperCAmelCase_ ( a):
lowerCamelCase__ = version.parse('1.11')
@property
def snake_case__ ( self):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def snake_case__ ( self):
'''simple docstring'''
return 1E-4
| 658 | 0 |
from __future__ import annotations
from typing import TypedDict
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 42
lowerCamelCase__ = 42
def A ( _lowerCamelCase ):
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(_lowerCamelCase ) )]
def A ( _lowerCamelCase ):
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
_lowerCAmelCase : int = all_rotations(_lowerCamelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_lowerCAmelCase : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(_lowerCamelCase ),
}
return response
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
_lowerCAmelCase : Tuple = int(_lowerCamelCase )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(_lowerCamelCase ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
_lowerCAmelCase : Optional[int] = [""] * len(_lowerCamelCase )
for _ in range(len(_lowerCamelCase ) ):
for i in range(len(_lowerCamelCase ) ):
_lowerCAmelCase : List[str] = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
_snake_case = "Provide a string that I will generate its BWT transform: "
_snake_case = input(entry_msg).strip()
_snake_case = bwt_transform(s)
print(
f'''Burrows Wheeler transform for string \'{s}\' results '''
f'''in \'{result["bwt_string"]}\''''
)
_snake_case = reverse_bwt(result["bwt_string"], result["idx_original_string"])
print(
f'''Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' '''
f'''we get original string \'{original_string}\''''
)
| 719 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_snake_case = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = XLNetConfig.from_json_file(_lowerCamelCase )
_lowerCAmelCase : Any = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"Building PyTorch XLNetForSequenceClassification model from configuration: {config}" )
_lowerCAmelCase : Any = finetuning_task
_lowerCAmelCase : Any = GLUE_TASKS_NUM_LABELS[finetuning_task]
_lowerCAmelCase : Union[str, Any] = XLNetForSequenceClassification(_lowerCamelCase )
elif "squad" in finetuning_task:
_lowerCAmelCase : Union[str, Any] = finetuning_task
_lowerCAmelCase : Any = XLNetForQuestionAnswering(_lowerCamelCase )
else:
_lowerCAmelCase : Union[str, Any] = XLNetLMHeadModel(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
_lowerCAmelCase : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase )
print(F"Save PyTorch model to {os.path.abspath(_lowerCamelCase )}" )
torch.save(model.state_dict() , _lowerCamelCase )
print(F"Save configuration file to {os.path.abspath(_lowerCamelCase )}" )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
_snake_case = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 658 | 0 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class UpperCAmelCase_ ( a):
def __init__( self, __a, __a = None, __a = None, __a = None, __a = False, __a = False, __a = None, __a = None, **__a, ):
'''simple docstring'''
super().__init__(
__a, split=__a, features=__a, cache_dir=__a, keep_in_memory=__a, streaming=__a, num_proc=__a, **__a, )
_lowerCAmelCase : List[Any] = field
_lowerCAmelCase : Tuple = path_or_paths if isinstance(__a, __a) else {self.split: path_or_paths}
_lowerCAmelCase : str = Json(
cache_dir=__a, data_files=__a, features=__a, field=__a, **__a, )
def snake_case__ ( self):
'''simple docstring'''
if self.streaming:
_lowerCAmelCase : List[Any] = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : str = None
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : Union[str, Any] = None
self.builder.download_and_prepare(
download_config=__a, download_mode=__a, verification_mode=__a, base_path=__a, num_proc=self.num_proc, )
_lowerCAmelCase : Optional[Any] = self.builder.as_dataset(
split=self.split, verification_mode=__a, in_memory=self.keep_in_memory)
return dataset
class UpperCAmelCase_ :
def __init__( self, __a, __a, __a = None, __a = None, **__a, ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f"num_proc {num_proc} must be an integer > 0.")
_lowerCAmelCase : int = dataset
_lowerCAmelCase : Dict = path_or_buf
_lowerCAmelCase : Any = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_lowerCAmelCase : int = num_proc
_lowerCAmelCase : Dict = "utf-8"
_lowerCAmelCase : List[str] = to_json_kwargs
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.to_json_kwargs.pop("path_or_buf", __a)
_lowerCAmelCase : List[Any] = self.to_json_kwargs.pop("orient", "records")
_lowerCAmelCase : int = self.to_json_kwargs.pop("lines", True if orient == "records" else False)
_lowerCAmelCase : List[Any] = self.to_json_kwargs.pop("index", False if orient in ["split", "table"] else True)
_lowerCAmelCase : Dict = self.to_json_kwargs.pop("compression", __a)
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f"`datasets` currently does not support {compression} compression")
if isinstance(self.path_or_buf, (str, bytes, os.PathLike)):
with fsspec.open(self.path_or_buf, "wb", compression=__a) as buffer:
_lowerCAmelCase : Dict = self._write(file_obj=__a, orient=__a, lines=__a, index=__a, **self.to_json_kwargs)
else:
if compression:
raise NotImplementedError(
f"The compression parameter is not supported when writing to a buffer, but compression={compression}"
" was passed. Please provide a local path instead.")
_lowerCAmelCase : Tuple = self._write(
file_obj=self.path_or_buf, orient=__a, lines=__a, index=__a, **self.to_json_kwargs)
return written
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = args
_lowerCAmelCase : Optional[int] = query_table(
table=self.dataset.data, key=slice(__a, offset + self.batch_size), indices=self.dataset._indices, )
_lowerCAmelCase : str = batch.to_pandas().to_json(
path_or_buf=__a, orient=__a, lines=__a, index=__a, **__a)
if not json_str.endswith("\n"):
json_str += "\n"
return json_str.encode(self.encoding)
def snake_case__ ( self, __a, __a, __a, __a, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0, len(self.dataset), self.batch_size), unit="ba", disable=not logging.is_progress_bar_enabled(), desc="Creating json from Arrow format", ):
_lowerCAmelCase : Any = self._batch_json((offset, orient, lines, index, to_json_kwargs))
written += file_obj.write(__a)
else:
_lowerCAmelCase : Tuple = len(self.dataset), self.batch_size
with multiprocessing.Pool(self.num_proc) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json, [(offset, orient, lines, index, to_json_kwargs) for offset in range(0, __a, __a)], ), total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size, unit="ba", disable=not logging.is_progress_bar_enabled(), desc="Creating json from Arrow format", ):
written += file_obj.write(__a)
return written
| 720 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
_snake_case = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
_snake_case = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
_snake_case = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def snake_case__ ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string", id="token"), id="sequence"),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string", id="token"), id="sequence"), id="references"),
}), codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"], reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
], )
def snake_case__ ( self, __a, __a, __a=4, __a=False):
'''simple docstring'''
_lowerCAmelCase : List[str] = compute_bleu(
reference_corpus=__a, translation_corpus=__a, max_order=__a, smooth=__a)
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Dict = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 658 | 0 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase_ ( enum.Enum):
lowerCamelCase__ = 0
lowerCamelCase__ = 1
lowerCamelCase__ = 2
@add_end_docstrings(a)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self, *__a, **__a):
'''simple docstring'''
super().__init__(*__a, **__a)
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING)
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_lowerCAmelCase : Tuple = None
if self.model.config.prefix is not None:
_lowerCAmelCase : Union[str, Any] = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_lowerCAmelCase : Any = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_lowerCAmelCase : Union[str, Any] = self._sanitize_parameters(prefix=__a, **self._forward_params)
_lowerCAmelCase : str = {**self._preprocess_params, **preprocess_params}
_lowerCAmelCase : str = {**self._forward_params, **forward_params}
def snake_case__ ( self, __a=None, __a=None, __a=None, __a=None, __a=None, __a=None, __a=None, __a=None, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = {}
if prefix is not None:
_lowerCAmelCase : str = prefix
if prefix:
_lowerCAmelCase : Dict = self.tokenizer(
__a, padding=__a, add_special_tokens=__a, return_tensors=self.framework)
_lowerCAmelCase : Any = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
" [None, 'hole']")
_lowerCAmelCase : int = handle_long_generation
preprocess_params.update(__a)
_lowerCAmelCase : List[str] = generate_kwargs
_lowerCAmelCase : Dict = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`")
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`")
_lowerCAmelCase : Optional[Any] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`")
_lowerCAmelCase : Any = ReturnType.TENSORS
if return_type is not None:
_lowerCAmelCase : Tuple = return_type
if clean_up_tokenization_spaces is not None:
_lowerCAmelCase : Tuple = clean_up_tokenization_spaces
if stop_sequence is not None:
_lowerCAmelCase : str = self.tokenizer.encode(__a, add_special_tokens=__a)
if len(__a) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim.")
_lowerCAmelCase : str = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True})
return super()._parse_and_tokenize(*__a, **__a)
def __call__( self, __a, **__a):
'''simple docstring'''
return super().__call__(__a, **__a)
def snake_case__ ( self, __a, __a="", __a=None, **__a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.tokenizer(
prefix + prompt_text, padding=__a, add_special_tokens=__a, return_tensors=self.framework)
_lowerCAmelCase : Optional[Any] = prompt_text
if handle_long_generation == "hole":
_lowerCAmelCase : List[Any] = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
_lowerCAmelCase : Tuple = generate_kwargs["max_new_tokens"]
else:
_lowerCAmelCase : int = generate_kwargs.get("max_length", self.model.config.max_length) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected")
if cur_len + new_tokens > self.tokenizer.model_max_length:
_lowerCAmelCase : str = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length")
_lowerCAmelCase : Dict = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
_lowerCAmelCase : Optional[Any] = inputs["attention_mask"][:, -keep_length:]
return inputs
def snake_case__ ( self, __a, **__a):
'''simple docstring'''
_lowerCAmelCase : Tuple = model_inputs["input_ids"]
_lowerCAmelCase : Dict = model_inputs.get("attention_mask", __a)
# Allow empty prompts
if input_ids.shape[1] == 0:
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : Union[str, Any] = 1
else:
_lowerCAmelCase : str = input_ids.shape[0]
_lowerCAmelCase : List[str] = model_inputs.pop("prompt_text")
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_lowerCAmelCase : str = generate_kwargs.pop("prefix_length", 0)
if prefix_length > 0:
_lowerCAmelCase : List[str] = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
_lowerCAmelCase : Optional[int] = generate_kwargs.get("max_length") or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_lowerCAmelCase : Dict = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_lowerCAmelCase : Optional[Any] = self.model.generate(input_ids=__a, attention_mask=__a, **__a)
_lowerCAmelCase : Tuple = generated_sequence.shape[0]
if self.framework == "pt":
_lowerCAmelCase : Dict = generated_sequence.reshape(__a, out_b // in_b, *generated_sequence.shape[1:])
elif self.framework == "tf":
_lowerCAmelCase : Union[str, Any] = tf.reshape(__a, (in_b, out_b // in_b, *generated_sequence.shape[1:]))
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def snake_case__ ( self, __a, __a=ReturnType.FULL_TEXT, __a=True):
'''simple docstring'''
_lowerCAmelCase : int = model_outputs["generated_sequence"][0]
_lowerCAmelCase : Union[str, Any] = model_outputs["input_ids"]
_lowerCAmelCase : int = model_outputs["prompt_text"]
_lowerCAmelCase : Tuple = generated_sequence.numpy().tolist()
_lowerCAmelCase : int = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_lowerCAmelCase : int = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_lowerCAmelCase : Optional[int] = self.tokenizer.decode(
__a, skip_special_tokens=__a, clean_up_tokenization_spaces=__a, )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_lowerCAmelCase : int = 0
else:
_lowerCAmelCase : Optional[Any] = len(
self.tokenizer.decode(
input_ids[0], skip_special_tokens=__a, clean_up_tokenization_spaces=__a, ))
if return_type == ReturnType.FULL_TEXT:
_lowerCAmelCase : List[str] = prompt_text + text[prompt_length:]
else:
_lowerCAmelCase : List[Any] = text[prompt_length:]
_lowerCAmelCase : List[Any] = {"generated_text": all_text}
records.append(__a)
return records
| 721 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Dict = OmegaConf.load(_lowerCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(_lowerCamelCase ) ) )
return config
def A ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
if conf_path is None:
_lowerCAmelCase : Union[str, Any] = "./model_checkpoints/vqgan_only.yaml"
_lowerCAmelCase : Tuple = load_config(_lowerCamelCase , display=_lowerCamelCase )
_lowerCAmelCase : str = VQModel(**config.model.params )
if ckpt_path is None:
_lowerCAmelCase : Optional[int] = "./model_checkpoints/vqgan_only.pt"
_lowerCAmelCase : int = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
if ".ckpt" in ckpt_path:
_lowerCAmelCase : List[Any] = sd["state_dict"]
model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
model.to(_lowerCamelCase )
del sd
return model
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = model.encode(_lowerCamelCase )
print(F"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
_lowerCAmelCase : int = model.decode(_lowerCamelCase )
return xrec
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[str] = string.rsplit("." , 1 )
if reload:
_lowerCAmelCase : Dict = importlib.import_module(_lowerCamelCase )
importlib.reload(_lowerCamelCase )
return getattr(importlib.import_module(_lowerCamelCase , package=_lowerCamelCase ) , cls )
def A ( _lowerCamelCase ):
'''simple docstring'''
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" , {} ) )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True , _lowerCamelCase=True ):
'''simple docstring'''
_lowerCAmelCase : str = instantiate_from_config(_lowerCamelCase )
if sd is not None:
model.load_state_dict(_lowerCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if ckpt:
_lowerCAmelCase : Optional[int] = torch.load(_lowerCamelCase , map_location="cpu" )
_lowerCAmelCase : int = pl_sd["global_step"]
print(F"loaded model from global step {global_step}." )
else:
_lowerCAmelCase : Optional[int] = {"state_dict": None}
_lowerCAmelCase : Any = None
_lowerCAmelCase : Optional[int] = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=_lowerCamelCase , eval_mode=_lowerCamelCase )["model"]
return model, global_step
| 658 | 0 |
"""simple docstring"""
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
_snake_case = (
"4S 3H 2C 7S 5H",
"9D 8H 2C 6S 7H",
"2D 6D 9D TH 7D",
"TC 8C 2S JH 6C",
"JH 8S TH AH QH",
"TS KS 5S 9S AC",
"KD 6S 9D TH AD",
"KS 8D 4D 9S 4S", # pair
"8C 4S KH JS 4D", # pair
"QH 8H KD JH 8S", # pair
"KC 4H KS 2H 8D", # pair
"KD 4S KC 3H 8S", # pair
"AH 8S AS KC JH", # pair
"3H 4C 4H 3S 2H", # 2 pairs
"5S 5D 2C KH KH", # 2 pairs
"3C KH 5D 5S KH", # 2 pairs
"AS 3C KH AD KH", # 2 pairs
"7C 7S 3S 7H 5S", # 3 of a kind
"7C 7S KH 2H 7H", # 3 of a kind
"AC KH QH AH AS", # 3 of a kind
"2H 4D 3C AS 5S", # straight (low ace)
"3C 5C 4C 2C 6H", # straight
"6S 8S 7S 5H 9H", # straight
"JS QS 9H TS KH", # straight
"QC KH TS JS AH", # straight (high ace)
"8C 9C 5C 3C TC", # flush
"3S 8S 9S 5S KS", # flush
"4C 5C 9C 8C KC", # flush
"JH 8H AH KH QH", # flush
"3D 2H 3H 2C 2D", # full house
"2H 2C 3S 3H 3D", # full house
"KH KC 3S 3H 3D", # full house
"JC 6H JS JD JH", # 4 of a kind
"JC 7H JS JD JH", # 4 of a kind
"JC KH JS JD JH", # 4 of a kind
"2S AS 4S 5S 3S", # straight flush (low ace)
"2D 6D 3D 4D 5D", # straight flush
"5C 6C 3C 7C 4C", # straight flush
"JH 9H TH KH QH", # straight flush
"JH AH TH KH QH", # royal flush (high ace straight flush)
)
_snake_case = (
("2H 3H 4H 5H 6H", "KS AS TS QS JS", "Loss"),
("2H 3H 4H 5H 6H", "AS AD AC AH JD", "Win"),
("AS AH 2H AD AC", "JS JD JC JH 3D", "Win"),
("2S AH 2H AS AC", "JS JD JC JH AD", "Loss"),
("2S AH 2H AS AC", "2H 3H 5H 6H 7H", "Win"),
("AS 3S 4S 8S 2S", "2H 3H 5H 6H 7H", "Win"),
("2H 3H 5H 6H 7H", "2S 3H 4H 5S 6C", "Win"),
("2S 3H 4H 5S 6C", "3D 4C 5H 6H 2S", "Tie"),
("2S 3H 4H 5S 6C", "AH AC 5H 6H AS", "Win"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H AS", "Loss"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H 7S", "Win"),
("6S AD 7H 4S AS", "AH AC 5H 6H 7S", "Loss"),
("2S AH 4H 5S KC", "AH AC 5H 6H 7S", "Loss"),
("2S 3H 6H 7S 9C", "7H 3C TH 6H 9S", "Loss"),
("4S 5H 6H TS AC", "3S 5H 6H TS AC", "Win"),
("2S AH 4H 5S 6C", "AD 4C 5H 6H 2C", "Tie"),
("AS AH 3H AD AC", "AS AH 2H AD AC", "Win"),
("AH AC 5H 5C QS", "AH AC 5H 5C KS", "Loss"),
("AH AC 5H 5C QS", "KH KC 5H 5C QS", "Win"),
("7C 7S KH 2H 7H", "3C 3S AH 2H 3H", "Win"),
("3C 3S AH 2H 3H", "7C 7S KH 2H 7H", "Loss"),
("6H 5H 4H 3H 2H", "5H 4H 3H 2H AH", "Win"),
("5H 4H 3H 2H AH", "5H 4H 3H 2H AH", "Tie"),
("5H 4H 3H 2H AH", "6H 5H 4H 3H 2H", "Loss"),
("AH AD KS KC AC", "AH KD KH AC KC", "Win"),
("2H 4D 3C AS 5S", "2H 4D 3C 6S 5S", "Loss"),
("2H 3S 3C 3H 2S", "3S 3C 2S 2H 2D", "Win"),
("4D 6D 5D 2D JH", "3S 8S 3H TC KH", "Loss"),
("4S 6C 8S 3S 7S", "AD KS 2D 7D 7C", "Loss"),
("6S 4C 7H 8C 3H", "5H JC AH 9D 9C", "Loss"),
("9D 9H JH TC QH", "3C 2S JS 5C 7H", "Win"),
("2H TC 8S AD 9S", "4H TS 7H 2C 5C", "Win"),
("9D 3S 2C 7S 7C", "JC TD 3C TC 9H", "Loss"),
)
_snake_case = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", True),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", False),
("AS 3S 4S 8S 2S", True),
)
_snake_case = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", False),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", True),
)
_snake_case = (
("2H 4D 3C AS 5S", True, [5, 4, 3, 2, 14]),
("2H 5D 3C AS 5S", False, [14, 5, 5, 3, 2]),
("JH QD KC AS TS", False, [14, 13, 12, 11, 10]),
("9D 3S 2C 7S 7C", False, [9, 7, 7, 3, 2]),
)
_snake_case = (
("JH AH TH KH QH", 0),
("JH 9H TH KH QH", 0),
("JC KH JS JD JH", 7),
("KH KC 3S 3H 3D", 6),
("8C 9C 5C 3C TC", 0),
("JS QS 9H TS KH", 0),
("7C 7S KH 2H 7H", 3),
("3C KH 5D 5S KH", 2),
("QH 8H KD JH 8S", 1),
("2D 6D 9D TH 7D", 0),
)
_snake_case = (
("JH AH TH KH QH", 23),
("JH 9H TH KH QH", 22),
("JC KH JS JD JH", 21),
("KH KC 3S 3H 3D", 20),
("8C 9C 5C 3C TC", 19),
("JS QS 9H TS KH", 18),
("7C 7S KH 2H 7H", 17),
("3C KH 5D 5S KH", 16),
("QH 8H KD JH 8S", 15),
("2D 6D 9D TH 7D", 14),
)
def snake_case ( )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = randrange(len(_a ) ), randrange(len(_a ) )
lowerCamelCase__ = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
lowerCamelCase__ , lowerCamelCase__ = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def snake_case ( _a: int = 100 )-> Dict:
'''simple docstring'''
return (generate_random_hand() for _ in range(_a ))
@pytest.mark.parametrize('hand, expected' , _a )
def snake_case ( _a: Dict , _a: Any )-> List[str]:
'''simple docstring'''
assert PokerHand(_a )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , _a )
def snake_case ( _a: List[str] , _a: Dict )-> Union[str, Any]:
'''simple docstring'''
assert PokerHand(_a )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , _a )
def snake_case ( _a: Dict , _a: Dict , _a: List[str] )-> Any:
'''simple docstring'''
lowerCamelCase__ = PokerHand(_a )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , _a )
def snake_case ( _a: str , _a: Tuple )-> Any:
'''simple docstring'''
assert PokerHand(_a )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , _a )
def snake_case ( _a: Union[str, Any] , _a: Any )-> str:
'''simple docstring'''
assert PokerHand(_a )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , _a )
def snake_case ( _a: int , _a: Optional[Any] , _a: str )-> List[str]:
'''simple docstring'''
assert PokerHand(_a ).compare_with(PokerHand(_a ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def snake_case ( _a: List[Any] , _a: Optional[Any] , _a: Tuple )-> Dict:
'''simple docstring'''
assert PokerHand(_a ).compare_with(PokerHand(_a ) ) == expected
def snake_case ( )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = [PokerHand(_a ) for hand in SORTED_HANDS]
lowerCamelCase__ = poker_hands.copy()
shuffle(_a )
lowerCamelCase__ = chain(sorted(_a ) )
for index, hand in enumerate(_a ):
assert hand == poker_hands[index]
def snake_case ( )-> Dict:
'''simple docstring'''
lowerCamelCase__ = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=_a )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def snake_case ( )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = PokerHand('2C 4S AS 3D 5C' )
lowerCamelCase__ = True
lowerCamelCase__ = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def snake_case ( )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = 0
lowerCamelCase__ = os.path.abspath(os.path.dirname(_a ) )
lowerCamelCase__ = os.path.join(_a , 'poker_hands.txt' )
with open(_a ) as file_hand:
for line in file_hand:
lowerCamelCase__ = line[:14].strip()
lowerCamelCase__ = line[15:].strip()
lowerCamelCase__ , lowerCamelCase__ = PokerHand(_a ), PokerHand(_a )
lowerCamelCase__ = player.compare_with(_a )
if output == "Win":
answer += 1
assert answer == 376
| 659 |
"""simple docstring"""
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
def count_of_possible_combinations(_a: int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_a )
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
_a: int , _a: list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowerCamelCase__ = sum(
count_of_possible_combinations_with_dp_array(target - item , _a )
for item in array )
lowerCamelCase__ = answer
return answer
lowerCamelCase__ = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_a , _a )
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = [0] * (target + 1)
lowerCamelCase__ = 1
for i in range(1 , target + 1 ):
for j in range(_a ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = 3
_snake_case = 5
_snake_case = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 659 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def snake_case ( _a: Optional[int] )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['stage2', 'stage3', 'stage4'] , )
lowerCamelCase__ = DetaConfig(
backbone_config=_a , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=_a , with_box_refine=_a , two_stage=_a , )
# set labels
lowerCamelCase__ = 'huggingface/label-files'
if "o365" in model_name:
lowerCamelCase__ = 366
lowerCamelCase__ = 'object365-id2label.json'
else:
lowerCamelCase__ = 91
lowerCamelCase__ = 'coco-detection-id2label.json'
lowerCamelCase__ = num_labels
lowerCamelCase__ = json.load(open(cached_download(hf_hub_url(_a , _a , repo_type='dataset' ) ) , 'r' ) )
lowerCamelCase__ = {int(_a ): v for k, v in idalabel.items()}
lowerCamelCase__ = idalabel
lowerCamelCase__ = {v: k for k, v in idalabel.items()}
return config
def snake_case ( _a: int )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') )
rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm1.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm1.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm2.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm2.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.0.body.layers.{i}.downsample.reduction.weight', F'model.backbone.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.downsample.norm.weight', F'model.backbone.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.downsample.norm.bias', F'model.backbone.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') )
rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') )
rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') )
rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') )
rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') )
rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight', F'model.encoder.layers.{i}.self_attn.sampling_offsets.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias', F'model.encoder.layers.{i}.self_attn.sampling_offsets.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.attention_weights.weight', F'model.encoder.layers.{i}.self_attn.attention_weights.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.attention_weights.bias', F'model.encoder.layers.{i}.self_attn.attention_weights.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.value_proj.weight', F'model.encoder.layers.{i}.self_attn.value_proj.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.value_proj.bias', F'model.encoder.layers.{i}.self_attn.value_proj.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.output_proj.weight', F'model.encoder.layers.{i}.self_attn.output_proj.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.output_proj.bias', F'model.encoder.layers.{i}.self_attn.output_proj.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.weight', F'model.encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'model.encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'model.encoder.layers.{i}.fc1.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'model.encoder.layers.{i}.fc1.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'model.encoder.layers.{i}.fc2.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'model.encoder.layers.{i}.fc2.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'model.encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'model.encoder.layers.{i}.final_layer_norm.bias') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight', F'model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias', F'model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.attention_weights.weight', F'model.decoder.layers.{i}.encoder_attn.attention_weights.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.attention_weights.bias', F'model.decoder.layers.{i}.encoder_attn.attention_weights.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.value_proj.weight', F'model.decoder.layers.{i}.encoder_attn.value_proj.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.value_proj.bias', F'model.decoder.layers.{i}.encoder_attn.value_proj.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.output_proj.weight', F'model.decoder.layers.{i}.encoder_attn.output_proj.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.output_proj.bias', F'model.decoder.layers.{i}.encoder_attn.output_proj.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.weight', F'model.decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'model.decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'model.decoder.layers.{i}.self_attn.out_proj.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'model.decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm2.weight', F'model.decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm2.bias', F'model.decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'model.decoder.layers.{i}.fc1.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'model.decoder.layers.{i}.fc1.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'model.decoder.layers.{i}.fc2.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'model.decoder.layers.{i}.fc2.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'model.decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'model.decoder.layers.{i}.final_layer_norm.bias') )
# fmt: on
return rename_keys
def snake_case ( _a: Tuple , _a: Optional[int] , _a: Optional[Any] )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = dct.pop(_a )
lowerCamelCase__ = val
def snake_case ( _a: List[str] , _a: Optional[int] )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowerCamelCase__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowerCamelCase__ = state_dict.pop(F'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight' )
lowerCamelCase__ = state_dict.pop(F'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ = in_proj_weight[:dim, :]
lowerCamelCase__ = in_proj_bias[: dim]
lowerCamelCase__ = in_proj_weight[
dim : dim * 2, :
]
lowerCamelCase__ = in_proj_bias[
dim : dim * 2
]
lowerCamelCase__ = in_proj_weight[
-dim :, :
]
lowerCamelCase__ = in_proj_bias[-dim :]
# fmt: on
def snake_case ( _a: Union[str, Any] , _a: List[str] )-> int:
'''simple docstring'''
lowerCamelCase__ = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
lowerCamelCase__ = state_dict.pop(F'transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
lowerCamelCase__ = state_dict.pop(F'transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ = in_proj_weight[:hidden_size, :]
lowerCamelCase__ = in_proj_bias[:hidden_size]
lowerCamelCase__ = in_proj_weight[
hidden_size : hidden_size * 2, :
]
lowerCamelCase__ = in_proj_bias[hidden_size : hidden_size * 2]
lowerCamelCase__ = in_proj_weight[-hidden_size:, :]
lowerCamelCase__ = in_proj_bias[-hidden_size:]
def snake_case ( )-> Dict:
'''simple docstring'''
lowerCamelCase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase__ = Image.open(requests.get(_a , stream=_a ).raw )
return im
@torch.no_grad()
def snake_case ( _a: Optional[Any] , _a: int , _a: int )-> Any:
'''simple docstring'''
lowerCamelCase__ = get_deta_config(_a )
# load original state dict
if model_name == "deta-swin-large":
lowerCamelCase__ = hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' )
elif model_name == "deta-swin-large-o365":
lowerCamelCase__ = hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' )
else:
raise ValueError(F'Model name {model_name} not supported' )
lowerCamelCase__ = torch.load(_a , map_location='cpu' )['model']
# original state dict
for name, param in state_dict.items():
print(_a , param.shape )
# rename keys
lowerCamelCase__ = create_rename_keys(_a )
for src, dest in rename_keys:
rename_key(_a , _a , _a )
read_in_swin_q_k_v(_a , config.backbone_config )
read_in_decoder_q_k_v(_a , _a )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
lowerCamelCase__ = state_dict.pop(_a )
lowerCamelCase__ = val
if "input_proj" in key:
lowerCamelCase__ = state_dict.pop(_a )
lowerCamelCase__ = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
lowerCamelCase__ = state_dict.pop(_a )
lowerCamelCase__ = val
# finally, create HuggingFace model and load state dict
lowerCamelCase__ = DetaForObjectDetection(_a )
model.load_state_dict(_a )
model.eval()
lowerCamelCase__ = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(_a )
# load image processor
lowerCamelCase__ = DetaImageProcessor(format='coco_detection' )
# verify our conversion on image
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = processor(images=_a , return_tensors='pt' )
lowerCamelCase__ = encoding['pixel_values']
lowerCamelCase__ = model(pixel_values.to(_a ) )
# verify logits
print('Logits:' , outputs.logits[0, :3, :3] )
print('Boxes:' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
lowerCamelCase__ = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
lowerCamelCase__ = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
lowerCamelCase__ = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
lowerCamelCase__ = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(_a ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(_a ) , atol=1E-4 )
print('Everything ok!' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F'Saving PyTorch model and processor to {pytorch_dump_folder_path}...' )
Path(_a ).mkdir(exist_ok=_a )
model.save_pretrained(_a )
processor.save_pretrained(_a )
# Push to hub
if push_to_hub:
print('Pushing model and processor to hub...' )
model.push_to_hub(F'jozhang97/{model_name}' )
processor.push_to_hub(F'jozhang97/{model_name}' )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
type=str,
default="deta-swin-large",
choices=["deta-swin-large", "deta-swin-large-o365"],
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
help="Path to the folder to output PyTorch model.",
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_snake_case = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 659 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 659 | 1 |
"""simple docstring"""
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = "▁"
_snake_case = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
_snake_case = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
_snake_case = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
_snake_case = {
"ernie-m-base": 514,
"ernie-m-large": 514,
}
_snake_case = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : List[str] = ["input_ids"]
a_ : Tuple = VOCAB_FILES_NAMES
a_ : List[Any] = PRETRAINED_INIT_CONFIGURATION
a_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
a_ : Optional[int] = RESOURCE_FILES_NAMES
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : Tuple="utf8" , SCREAMING_SNAKE_CASE__ : Optional[int]="[UNK]" , SCREAMING_SNAKE_CASE__ : str="[SEP]" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="[PAD]" , SCREAMING_SNAKE_CASE__ : str="[CLS]" , SCREAMING_SNAKE_CASE__ : Optional[int]="[MASK]" , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE__ : Any , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , vocab_file=SCREAMING_SNAKE_CASE__ , encoding=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = do_lower_case
lowerCamelCase__ = sentencepiece_model_ckpt
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE__ )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowerCamelCase__ = self.load_vocab(filepath=SCREAMING_SNAKE_CASE__ )
else:
lowerCamelCase__ = {self.sp_model.id_to_piece(SCREAMING_SNAKE_CASE__ ): id for id in range(self.sp_model.get_piece_size() )}
lowerCamelCase__ = {v: k for k, v in self.vocab.items()}
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
if text is None:
return None
lowerCamelCase__ = self.tokenize(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ , lowerCamelCase__ = '', []
for i, ch in enumerate(SCREAMING_SNAKE_CASE__ ):
if ch in self.SP_CHAR_MAPPING:
lowerCamelCase__ = self.SP_CHAR_MAPPING.get(SCREAMING_SNAKE_CASE__ )
else:
lowerCamelCase__ = unicodedata.normalize('NFKC' , SCREAMING_SNAKE_CASE__ )
if self.is_whitespace(SCREAMING_SNAKE_CASE__ ):
continue
normalized_text += ch
char_mapping.extend([i] * len(SCREAMING_SNAKE_CASE__ ) )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = normalized_text, [], 0
if self.do_lower_case:
lowerCamelCase__ = text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowerCamelCase__ = token[1:]
lowerCamelCase__ = text[offset:].index(SCREAMING_SNAKE_CASE__ ) + offset
lowerCamelCase__ = start + len(SCREAMING_SNAKE_CASE__ )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowerCamelCase__ = end
return token_mapping
@property
def _UpperCamelCase ( self : List[str] ):
return len(self.vocab )
def _UpperCamelCase ( self : int ):
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : int ):
lowerCamelCase__ = self.__dict__.copy()
lowerCamelCase__ = None
return state
def __setstate__( self : Tuple , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCamelCase__ = {}
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : int ):
return "".join((self.SP_CHAR_MAPPING.get(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for c in text) )
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : Optional[Any]=64 , SCREAMING_SNAKE_CASE__ : str=0.1 ):
if self.sp_model_kwargs.get('enable_sampling' ) is True:
lowerCamelCase__ = True
if self.sp_model_kwargs.get('alpha' ) is not None:
lowerCamelCase__ = self.sp_model_kwargs.get('alpha' )
if self.sp_model_kwargs.get('nbest_size' ) is not None:
lowerCamelCase__ = self.sp_model_kwargs.get('nbest_size' )
if not enable_sampling:
lowerCamelCase__ = self.sp_model.EncodeAsPieces(SCREAMING_SNAKE_CASE__ )
else:
lowerCamelCase__ = self.sp_model.SampleEncodeAsPieces(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = []
for pi, piece in enumerate(SCREAMING_SNAKE_CASE__ ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(SCREAMING_SNAKE_CASE__ ) and pi != 0:
new_pieces.append(SCREAMING_SNAKE_CASE__ )
continue
else:
continue
lowerCamelCase__ = 0
for i, chunk in enumerate(SCREAMING_SNAKE_CASE__ ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(SCREAMING_SNAKE_CASE__ ) or self.is_punct(SCREAMING_SNAKE_CASE__ ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCamelCase__ = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCamelCase__ = i
if len(SCREAMING_SNAKE_CASE__ ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = ''.join(SCREAMING_SNAKE_CASE__ ).replace(SCREAMING_SNAKE_CASE__ , ' ' ).strip()
return out_string
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any ):
lowerCamelCase__ = self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = ''.join(SCREAMING_SNAKE_CASE__ ).replace(SCREAMING_SNAKE_CASE__ , ' ' ).strip()
return out_string
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : int ):
return self.vocab.get(SCREAMING_SNAKE_CASE__ , self.vocab.get(self.unk_token ) )
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
return self.reverse_vocab.get(SCREAMING_SNAKE_CASE__ , self.unk_token )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple=None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
lowerCamelCase__ = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int]=None ):
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : List[Any]=False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(SCREAMING_SNAKE_CASE__ ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(SCREAMING_SNAKE_CASE__ ) + 1) + [1] * (len(SCREAMING_SNAKE_CASE__ ) + 3)
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
if "\u4e00" <= char <= "\u9fff":
return True
return False
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : Any ):
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Tuple ):
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int ):
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(SCREAMING_SNAKE_CASE__ ) == 1:
lowerCamelCase__ = unicodedata.category(SCREAMING_SNAKE_CASE__ )
if cat == "Zs":
return True
return False
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = {}
with io.open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = line.rstrip('\n' )
lowerCamelCase__ = int(SCREAMING_SNAKE_CASE__ )
return token_to_idx
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
lowerCamelCase__ = 0
if os.path.isdir(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
lowerCamelCase__ = (filename_prefix + '-' if filename_prefix else '') + save_directory
with open(SCREAMING_SNAKE_CASE__ , 'w' , encoding='utf-8' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda SCREAMING_SNAKE_CASE__ : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
lowerCamelCase__ = token_index
writer.write(token + '\n' )
index += 1
lowerCamelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , 'sentencepiece.bpe.model' )
with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as fi:
lowerCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (vocab_file,)
| 659 |
"""simple docstring"""
def snake_case ( _a: list[list[float]] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = []
for data in source_data:
for i, el in enumerate(_a ):
if len(_a ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(_a ) )
return data_lists
def snake_case ( _a: list[list[float]] , _a: list[int] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = []
for dlist, weight in zip(_a , _a ):
lowerCamelCase__ = min(_a )
lowerCamelCase__ = max(_a )
lowerCamelCase__ = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowerCamelCase__ = F'Invalid weight of {weight:f} provided'
raise ValueError(_a )
score_lists.append(_a )
return score_lists
def snake_case ( _a: list[list[float]] )-> list[float]:
'''simple docstring'''
lowerCamelCase__ = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(_a ):
lowerCamelCase__ = final_scores[j] + ele
return final_scores
def snake_case ( _a: list[list[float]] , _a: list[int] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = get_data(_a )
lowerCamelCase__ = calculate_each_score(_a , _a )
lowerCamelCase__ = generate_final_scores(_a )
# append scores to source data
for i, ele in enumerate(_a ):
source_data[i].append(_a )
return source_data
| 659 | 1 |
"""simple docstring"""
def snake_case ( _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = abs(_a )
lowerCamelCase__ = 0
while n > 0:
res += n % 10
n //= 10
return res
def snake_case ( _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = abs(_a )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def snake_case ( _a: int )-> int:
'''simple docstring'''
return sum(int(_a ) for c in str(abs(_a ) ) )
def snake_case ( )-> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_a: Callable , _a: int ) -> None:
lowerCamelCase__ = F'{func.__name__}({value})'
lowerCamelCase__ = timeit(F'__main__.{call}' , setup='import __main__' )
print(F'{call:56} = {func(_a )} -- {timing:.4f} seconds' )
for value in (262144, 1125899906842624, 1267650600228229401496703205376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_a , _a )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 659 |
"""simple docstring"""
from __future__ import annotations
from math import gcd
def snake_case ( _a: int , _a: int = 2 , _a: int = 1 , _a: int = 3 , )-> int | None:
'''simple docstring'''
if num < 2:
raise ValueError('The input value cannot be less than 2' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(_a: int , _a: int , _a: int ) -> int:
return (pow(_a , 2 ) + step) % modulus
for _ in range(_a ):
# These track the position within the cycle detection logic.
lowerCamelCase__ = seed
lowerCamelCase__ = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
lowerCamelCase__ = rand_fn(_a , _a , _a )
lowerCamelCase__ = rand_fn(_a , _a , _a )
lowerCamelCase__ = rand_fn(_a , _a , _a )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
lowerCamelCase__ = gcd(hare - tortoise , _a )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
lowerCamelCase__ = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"num",
type=int,
help="The value to find a divisor of",
)
parser.add_argument(
"--attempts",
type=int,
default=3,
help="The number of attempts before giving up",
)
_snake_case = parser.parse_args()
_snake_case = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f"""{args.num} is probably prime""")
else:
_snake_case = args.num // divisor
print(f"""{args.num} = {divisor} * {quotient}""")
| 659 | 1 |
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def snake_case ( _a: str )-> Tuple:
'''simple docstring'''
lowerCamelCase__ = torch.load(_a , map_location='cpu' )
if "model" in sd.keys():
lowerCamelCase__ = torch.load(_a , map_location='cpu' )['model']
# pop unnecessary weights
lowerCamelCase__ = [
'decoder.version',
'decoder.output_projection.weight',
]
for key in keys_to_delete:
if key in sd:
sd.pop(_a )
lowerCamelCase__ = {
'decoder.project_in_dim.weight': 'decoder.project_in.weight',
'decoder.project_out_dim.weight': 'decoder.project_out.weight',
'decoder.layer_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.layer_norm.bias': 'decoder.final_layer_norm.bias',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
lowerCamelCase__ = sd.pop(_a )
lowerCamelCase__ = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
lowerCamelCase__ = sd[key]
# We split QKV in separate Q,K,V
lowerCamelCase__ = key.replace('.qkv_proj.' , '.q_proj.' )
lowerCamelCase__ = key.replace('.qkv_proj.' , '.k_proj.' )
lowerCamelCase__ = key.replace('.qkv_proj.' , '.v_proj.' )
lowerCamelCase__ = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = torch.split(_a , depth // 3 , dim=0 )
lowerCamelCase__ = q
lowerCamelCase__ = k
lowerCamelCase__ = v
del sd[key]
return sd
@torch.no_grad()
def snake_case ( _a: Union[str, Any] , _a: str , _a: Tuple=None )-> Dict:
'''simple docstring'''
lowerCamelCase__ = load_checkpoint(_a )
if config is not None:
lowerCamelCase__ = OPTConfig.from_pretrained(_a )
else:
lowerCamelCase__ = OPTConfig()
lowerCamelCase__ = OPTModel(_a ).half().eval()
model.load_state_dict(_a )
# Check results
Path(_a ).mkdir(exist_ok=_a )
model.save_pretrained(_a )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fairseq_path",
type=str,
help=(
"path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"
" https://huggingface.co/models?other=opt_metasq"
),
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--hf_config", default=None, type=str, help="Define HF config.")
_snake_case = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 659 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 659 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "sentencepiece.bpe.model"}
_snake_case = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
}
_snake_case = {
"moussaKam/mbarthez": 1024,
"moussaKam/barthez": 1024,
"moussaKam/barthez-orangesum-title": 1024,
}
_snake_case = "▁"
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : List[str] = VOCAB_FILES_NAMES
a_ : str = PRETRAINED_VOCAB_FILES_MAP
a_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Union[str, Any] = ['input_ids', 'attention_mask']
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple="<s>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="</s>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="</s>" , SCREAMING_SNAKE_CASE__ : int="<s>" , SCREAMING_SNAKE_CASE__ : str="<unk>" , SCREAMING_SNAKE_CASE__ : int="<pad>" , SCREAMING_SNAKE_CASE__ : List[str]="<mask>" , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = vocab_file
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE__ ) )
lowerCamelCase__ = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
lowerCamelCase__ = len(self.sp_model ) - 1
lowerCamelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
lowerCamelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _UpperCamelCase ( self : List[str] ):
return len(self.sp_model )
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str ):
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[str] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCamelCase__ = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ )
return spm_id if spm_id else self.unk_token_id
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
lowerCamelCase__ = []
lowerCamelCase__ = ''
lowerCamelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) + token
lowerCamelCase__ = True
lowerCamelCase__ = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ )
return out_string.strip()
def __getstate__( self : str ):
lowerCamelCase__ = self.__dict__.copy()
lowerCamelCase__ = None
return state
def __setstate__( self : Tuple , SCREAMING_SNAKE_CASE__ : Tuple ):
lowerCamelCase__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCamelCase__ = {}
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase__ = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as fi:
lowerCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 659 |
"""simple docstring"""
from __future__ import annotations
_snake_case = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def snake_case ( _a: list[list[int]] , _a: list[int] , _a: list[int] , _a: int , _a: list[list[int]] , )-> tuple[list[list[int]], list[list[int]]]:
'''simple docstring'''
lowerCamelCase__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_a ) )
] # the reference grid
lowerCamelCase__ = 1
lowerCamelCase__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_a ) )
] # the action grid
lowerCamelCase__ = init[0]
lowerCamelCase__ = init[1]
lowerCamelCase__ = 0
lowerCamelCase__ = g + heuristic[x][y] # cost from starting cell to destination cell
lowerCamelCase__ = [[f, g, x, y]]
lowerCamelCase__ = False # flag that is set when search is complete
lowerCamelCase__ = False # flag set if we can't find expand
while not found and not resign:
if len(_a ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
lowerCamelCase__ = cell.pop()
lowerCamelCase__ = next_cell[2]
lowerCamelCase__ = next_cell[3]
lowerCamelCase__ = next_cell[1]
if x == goal[0] and y == goal[1]:
lowerCamelCase__ = True
else:
for i in range(len(_a ) ): # to try out different valid actions
lowerCamelCase__ = x + DIRECTIONS[i][0]
lowerCamelCase__ = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_a ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
lowerCamelCase__ = g + cost
lowerCamelCase__ = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
lowerCamelCase__ = 1
lowerCamelCase__ = i
lowerCamelCase__ = []
lowerCamelCase__ = goal[0]
lowerCamelCase__ = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
lowerCamelCase__ = x - DIRECTIONS[action[x][y]][0]
lowerCamelCase__ = y - DIRECTIONS[action[x][y]][1]
lowerCamelCase__ = xa
lowerCamelCase__ = ya
invpath.append([x, y] )
lowerCamelCase__ = []
for i in range(len(_a ) ):
path.append(invpath[len(_a ) - 1 - i] )
return path, action
if __name__ == "__main__":
_snake_case = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
_snake_case = [0, 0]
# all coordinates are given in format [y,x]
_snake_case = [len(grid) - 1, len(grid[0]) - 1]
_snake_case = 1
# the cost map which pushes the path closer to the goal
_snake_case = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
_snake_case = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
_snake_case = 99
_snake_case , _snake_case = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 659 | 1 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : List[Any] = (DPMSolverSinglestepScheduler,)
a_ : Optional[Any] = (('num_inference_steps', 25),)
def _UpperCamelCase ( self : Tuple , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
lowerCamelCase__ = {
'num_train_timesteps': 10_00,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
'prediction_type': 'epsilon',
'thresholding': False,
'sample_max_value': 1.0,
'algorithm_type': 'dpmsolver++',
'solver_type': 'midpoint',
'lambda_min_clipped': -float('inf' ),
'variance_type': None,
}
config.update(**SCREAMING_SNAKE_CASE__ )
return config
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : Any=0 , **SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = dict(self.forward_default_kwargs )
lowerCamelCase__ = kwargs.pop('num_inference_steps' , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.dummy_sample
lowerCamelCase__ = 0.1 * sample
lowerCamelCase__ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCamelCase__ = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
lowerCamelCase__ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
lowerCamelCase__ = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCamelCase__ , lowerCamelCase__ = sample, sample
for t in range(SCREAMING_SNAKE_CASE__ , time_step + scheduler.config.solver_order + 1 ):
lowerCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
lowerCamelCase__ = new_scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _UpperCamelCase ( self : List[Any] ):
pass
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , **SCREAMING_SNAKE_CASE__ : Tuple ):
lowerCamelCase__ = dict(self.forward_default_kwargs )
lowerCamelCase__ = kwargs.pop('num_inference_steps' , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.dummy_sample
lowerCamelCase__ = 0.1 * sample
lowerCamelCase__ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCamelCase__ = self.get_scheduler_config()
lowerCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals (must be after setting timesteps)
lowerCamelCase__ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residual (must be after setting timesteps)
lowerCamelCase__ = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
lowerCamelCase__ = new_scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : Optional[int]=None , **SCREAMING_SNAKE_CASE__ : Tuple ):
if scheduler is None:
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = 10
lowerCamelCase__ = self.dummy_model()
lowerCamelCase__ = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
return sample
def _UpperCamelCase ( self : Optional[Any] ):
lowerCamelCase__ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
lowerCamelCase__ = 50
lowerCamelCase__ = self.dummy_model()
lowerCamelCase__ = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
lowerCamelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_mean.item() - 0.25_74 ) < 1e-3
def _UpperCamelCase ( self : Tuple ):
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Dict ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
lowerCamelCase__ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
lowerCamelCase__ = self.full_loop(scheduler=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_mean.item() - 0.27_91 ) < 1e-3
lowerCamelCase__ = DEISMultistepScheduler.from_config(scheduler.config )
lowerCamelCase__ = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowerCamelCase__ = UniPCMultistepScheduler.from_config(scheduler.config )
lowerCamelCase__ = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowerCamelCase__ = self.full_loop(scheduler=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_mean.item() - 0.27_91 ) < 1e-3
def _UpperCamelCase ( self : Any ):
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE__ )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE__ , prediction_type=SCREAMING_SNAKE_CASE__ , sample_max_value=SCREAMING_SNAKE_CASE__ , algorithm_type='dpmsolver++' , solver_order=SCREAMING_SNAKE_CASE__ , solver_type=SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : List[Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[str] ):
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=SCREAMING_SNAKE_CASE__ , solver_type=SCREAMING_SNAKE_CASE__ , prediction_type=SCREAMING_SNAKE_CASE__ , algorithm_type=SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = self.full_loop(
solver_order=SCREAMING_SNAKE_CASE__ , solver_type=SCREAMING_SNAKE_CASE__ , prediction_type=SCREAMING_SNAKE_CASE__ , algorithm_type=SCREAMING_SNAKE_CASE__ , )
assert not torch.isnan(SCREAMING_SNAKE_CASE__ ).any(), "Samples have nan numbers"
def _UpperCamelCase ( self : List[Any] ):
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE__ )
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Any ):
self.check_over_configs(lambda_min_clipped=-float('inf' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def _UpperCamelCase ( self : List[Any] ):
self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE__ )
self.check_over_configs(variance_type='learned_range' )
def _UpperCamelCase ( self : int ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE__ , time_step=0 )
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = self.full_loop()
lowerCamelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_mean.item() - 0.27_91 ) < 1e-3
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = self.full_loop(use_karras_sigmas=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_mean.item() - 0.22_48 ) < 1e-3
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = self.full_loop(prediction_type='v_prediction' )
lowerCamelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_mean.item() - 0.14_53 ) < 1e-3
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = self.full_loop(prediction_type='v_prediction' , use_karras_sigmas=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_mean.item() - 0.06_49 ) < 1e-3
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config(thresholding=SCREAMING_SNAKE_CASE__ , dynamic_thresholding_ratio=0 )
lowerCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = 10
lowerCamelCase__ = self.dummy_model()
lowerCamelCase__ = self.dummy_sample_deter.half()
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
assert sample.dtype == torch.floataa
| 659 |
"""simple docstring"""
def snake_case ( _a: int = 4000000 )-> int:
'''simple docstring'''
lowerCamelCase__ = [0, 1]
lowerCamelCase__ = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowerCamelCase__ = 0
for j in range(len(_a ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 659 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
_snake_case = list[list[float | int]]
def snake_case ( _a: Matrix , _a: Matrix )-> Matrix:
'''simple docstring'''
lowerCamelCase__ = len(_a )
lowerCamelCase__ = [[0 for _ in range(size + 1 )] for _ in range(_a )]
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
for row in range(_a ):
for col in range(_a ):
lowerCamelCase__ = matrix[row][col]
lowerCamelCase__ = vector[row][0]
lowerCamelCase__ = 0
lowerCamelCase__ = 0
while row < size and col < size:
# pivoting
lowerCamelCase__ = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_a , _a ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
lowerCamelCase__ , lowerCamelCase__ = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _a ):
lowerCamelCase__ = augmented[rowa][col] / augmented[row][col]
lowerCamelCase__ = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _a ):
for row in range(_a ):
lowerCamelCase__ = augmented[row][col] / augmented[col][col]
for cola in range(_a , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_a )
]
def snake_case ( _a: list[int] )-> Callable[[int], int]:
'''simple docstring'''
lowerCamelCase__ = len(_a )
lowerCamelCase__ = [[0 for _ in range(_a )] for _ in range(_a )]
lowerCamelCase__ = [[0] for _ in range(_a )]
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
for x_val, y_val in enumerate(_a ):
for col in range(_a ):
lowerCamelCase__ = (x_val + 1) ** (size - col - 1)
lowerCamelCase__ = y_val
lowerCamelCase__ = solve(_a , _a )
def interpolated_func(_a: int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_a ) )
return interpolated_func
def snake_case ( _a: int )-> int:
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def snake_case ( _a: Callable[[int], int] = question_function , _a: int = 10 )-> int:
'''simple docstring'''
lowerCamelCase__ = [func(_a ) for x_val in range(1 , order + 1 )]
lowerCamelCase__ = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
lowerCamelCase__ = 0
lowerCamelCase__ = 42
lowerCamelCase__ = 42
for poly in polynomials:
lowerCamelCase__ = 1
while func(_a ) == poly(_a ):
x_val += 1
ret += poly(_a )
return ret
if __name__ == "__main__":
print(f"""{solution() = }""")
| 659 |
"""simple docstring"""
def snake_case ( _a: List[Any] , _a: Any , _a: str , _a: List[Any] )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = [False] * len(_a )
lowerCamelCase__ = []
queue.append(_a )
lowerCamelCase__ = True
while queue:
lowerCamelCase__ = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_a )
lowerCamelCase__ = True
lowerCamelCase__ = u
return visited[t]
def snake_case ( _a: List[Any] , _a: str , _a: List[str] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = [-1] * (len(_a ))
lowerCamelCase__ = 0
while bfs(_a , _a , _a , _a ):
lowerCamelCase__ = float('Inf' )
lowerCamelCase__ = sink
while s != source:
# Find the minimum value in select path
lowerCamelCase__ = min(_a , graph[parent[s]][s] )
lowerCamelCase__ = parent[s]
max_flow += path_flow
lowerCamelCase__ = sink
while v != source:
lowerCamelCase__ = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowerCamelCase__ = parent[v]
return max_flow
_snake_case = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_snake_case , _snake_case = 0, 5
print(ford_fulkerson(graph, source, sink))
| 659 | 1 |
"""simple docstring"""
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def snake_case ( _a: Optional[Any] )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = [False] * len(_a )
lowerCamelCase__ = [-1] * len(_a )
def dfs(_a: Any , _a: Optional[int] ):
lowerCamelCase__ = True
lowerCamelCase__ = c
for u in graph[v]:
if not visited[u]:
dfs(_a , 1 - c )
for i in range(len(_a ) ):
if not visited[i]:
dfs(_a , 0 )
for i in range(len(_a ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_snake_case = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 659 |
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
_snake_case = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Optional[int]=1 ):
lowerCamelCase__ = tokenizer
lowerCamelCase__ = dataset
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ ) if n_tasks is None else n_tasks
lowerCamelCase__ = n_copies
def __iter__( self : Any ):
lowerCamelCase__ = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
lowerCamelCase__ = self.tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = start_length
lowerCamelCase__ = eof_strings
lowerCamelCase__ = tokenizer
def __call__( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowerCamelCase__ = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(SCREAMING_SNAKE_CASE__ )
def snake_case ( _a: List[Any] )-> Dict:
'''simple docstring'''
lowerCamelCase__ = re.split('(%s)' % '|'.join(_a ) , _a )
# last string should be ""
return "".join(string_list[:-2] )
def snake_case ( _a: List[Any] , _a: Optional[int] , _a: str , _a: Union[str, Any] , _a: Dict , _a: Optional[int]=20 , **_a: Optional[int] )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = defaultdict(_a ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_a ) ):
with torch.no_grad():
lowerCamelCase__ = batch['ids'].shape[-1]
lowerCamelCase__ = accelerator.unwrap_model(_a ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_a , **_a )
# each task is generated batch_size times
lowerCamelCase__ = batch['task_id'].repeat(_a )
lowerCamelCase__ = accelerator.pad_across_processes(
_a , dim=1 , pad_index=tokenizer.pad_token_id )
lowerCamelCase__ , lowerCamelCase__ = accelerator.gather((generated_tokens, generated_tasks) )
lowerCamelCase__ = generated_tokens.cpu().numpy()
lowerCamelCase__ = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_a , _a ):
gen_token_dict[task].append(_a )
lowerCamelCase__ = [[] for _ in range(_a )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowerCamelCase__ = tokenizer.decode(_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a )
code_gens[task].append(remove_last_block(_a ) )
return code_gens
def snake_case ( )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = HfArgumentParser(_a )
lowerCamelCase__ = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowerCamelCase__ = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowerCamelCase__ = 'false'
if args.num_workers is None:
lowerCamelCase__ = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowerCamelCase__ = Accelerator()
set_seed(args.seed , device_specific=_a )
# Load model and tokenizer
lowerCamelCase__ = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase__ = tokenizer.eos_token
lowerCamelCase__ = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowerCamelCase__ = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _a , _a )] ),
}
# Load evaluation dataset and metric
lowerCamelCase__ = load_dataset('openai_humaneval' )
lowerCamelCase__ = load_metric('code_eval' )
lowerCamelCase__ = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
lowerCamelCase__ = args.n_samples // args.batch_size
lowerCamelCase__ = TokenizedDataset(_a , human_eval['test'] , n_copies=_a , n_tasks=_a )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowerCamelCase__ = DataLoader(_a , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowerCamelCase__ = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
lowerCamelCase__ , lowerCamelCase__ = accelerator.prepare(_a , _a )
lowerCamelCase__ = complete_code(
_a , _a , _a , _a , n_tasks=_a , batch_size=args.batch_size , **_a , )
if accelerator.is_main_process:
lowerCamelCase__ = []
for task in tqdm(range(_a ) ):
lowerCamelCase__ = human_eval['test'][task]['test']
lowerCamelCase__ = F'check({human_eval["test"][task]["entry_point"]})'
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
lowerCamelCase__ , lowerCamelCase__ = code_eval_metric.compute(
references=_a , predictions=_a , num_workers=args.num_workers )
print(F'Results: {pass_at_k}' )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_a , _a )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 659 | 1 |
"""simple docstring"""
from collections.abc import Generator
from math import sin
def snake_case ( _a: bytes )-> bytes:
'''simple docstring'''
if len(_a ) != 32:
raise ValueError('Input must be of length 32' )
lowerCamelCase__ = b''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def snake_case ( _a: int )-> bytes:
'''simple docstring'''
if i < 0:
raise ValueError('Input must be non-negative' )
lowerCamelCase__ = format(_a , '08x' )[-8:]
lowerCamelCase__ = b''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def snake_case ( _a: bytes )-> bytes:
'''simple docstring'''
lowerCamelCase__ = b''
for char in message:
bit_string += format(_a , '08b' ).encode('utf-8' )
lowerCamelCase__ = format(len(_a ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(_a ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def snake_case ( _a: bytes )-> Generator[list[int], None, None]:
'''simple docstring'''
if len(_a ) % 512 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(_a ) , 512 ):
lowerCamelCase__ = bit_string[pos : pos + 512]
lowerCamelCase__ = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def snake_case ( _a: int )-> int:
'''simple docstring'''
if i < 0:
raise ValueError('Input must be non-negative' )
lowerCamelCase__ = format(_a , '032b' )
lowerCamelCase__ = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(_a , 2 )
def snake_case ( _a: int , _a: int )-> int:
'''simple docstring'''
return (a + b) % 2**32
def snake_case ( _a: int , _a: int )-> int:
'''simple docstring'''
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def snake_case ( _a: bytes )-> bytes:
'''simple docstring'''
lowerCamelCase__ = preprocess(_a )
lowerCamelCase__ = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
lowerCamelCase__ = 0x6745_2301
lowerCamelCase__ = 0xEFCD_AB89
lowerCamelCase__ = 0x98BA_DCFE
lowerCamelCase__ = 0x1032_5476
lowerCamelCase__ = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(_a ):
lowerCamelCase__ = aa
lowerCamelCase__ = ba
lowerCamelCase__ = ca
lowerCamelCase__ = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
lowerCamelCase__ = d ^ (b & (c ^ d))
lowerCamelCase__ = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
lowerCamelCase__ = c ^ (d & (b ^ c))
lowerCamelCase__ = (5 * i + 1) % 16
elif i <= 47:
lowerCamelCase__ = b ^ c ^ d
lowerCamelCase__ = (3 * i + 5) % 16
else:
lowerCamelCase__ = c ^ (b | not_aa(_a ))
lowerCamelCase__ = (7 * i) % 16
lowerCamelCase__ = (f + a + added_consts[i] + block_words[g]) % 2**32
lowerCamelCase__ = d
lowerCamelCase__ = c
lowerCamelCase__ = b
lowerCamelCase__ = sum_aa(_a , left_rotate_aa(_a , shift_amounts[i] ) )
# Add hashed chunk to running total
lowerCamelCase__ = sum_aa(_a , _a )
lowerCamelCase__ = sum_aa(_a , _a )
lowerCamelCase__ = sum_aa(_a , _a )
lowerCamelCase__ = sum_aa(_a , _a )
lowerCamelCase__ = reformat_hex(_a ) + reformat_hex(_a ) + reformat_hex(_a ) + reformat_hex(_a )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 659 |
"""simple docstring"""
import argparse
import json
from tqdm import tqdm
def snake_case ( )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=_a , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=_a , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=_a , help='where to store parsed gold_data_path file' , )
lowerCamelCase__ = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
lowerCamelCase__ = json.load(_a )
for dpr_record in tqdm(_a ):
lowerCamelCase__ = dpr_record['question']
lowerCamelCase__ = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(_a ) + '\n' )
if __name__ == "__main__":
main()
| 659 | 1 |
"""simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class _a :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict=sys.maxsize ):
lowerCamelCase__ = 'bilinear'
lowerCamelCase__ = max_size
lowerCamelCase__ = short_edge_length
def __call__( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = []
for img in imgs:
lowerCamelCase__ , lowerCamelCase__ = img.shape[:2]
# later: provide list and randomly choose index for resize
lowerCamelCase__ = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
lowerCamelCase__ = size * 1.0 / min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if h < w:
lowerCamelCase__ , lowerCamelCase__ = size, scale * w
else:
lowerCamelCase__ , lowerCamelCase__ = scale * h, size
if max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) > self.max_size:
lowerCamelCase__ = self.max_size * 1.0 / max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = newh * scale
lowerCamelCase__ = neww * scale
lowerCamelCase__ = int(neww + 0.5 )
lowerCamelCase__ = int(newh + 0.5 )
if img.dtype == np.uinta:
lowerCamelCase__ = Image.fromarray(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
lowerCamelCase__ = np.asarray(SCREAMING_SNAKE_CASE__ )
else:
lowerCamelCase__ = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
lowerCamelCase__ = nn.functional.interpolate(
SCREAMING_SNAKE_CASE__ , (newh, neww) , mode=self.interp_method , align_corners=SCREAMING_SNAKE_CASE__ ).squeeze(0 )
img_augs.append(SCREAMING_SNAKE_CASE__ )
return img_augs
class _a :
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
lowerCamelCase__ = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
lowerCamelCase__ = cfg.INPUT.FORMAT
lowerCamelCase__ = cfg.SIZE_DIVISIBILITY
lowerCamelCase__ = cfg.PAD_VALUE
lowerCamelCase__ = cfg.INPUT.MAX_SIZE_TEST
lowerCamelCase__ = cfg.MODEL.DEVICE
lowerCamelCase__ = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
lowerCamelCase__ = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
lowerCamelCase__ = lambda SCREAMING_SNAKE_CASE__ : (x - self.pixel_mean) / self.pixel_std
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
lowerCamelCase__ = tuple(max(SCREAMING_SNAKE_CASE__ ) for s in zip(*[img.shape for img in images] ) )
lowerCamelCase__ = [im.shape[-2:] for im in images]
lowerCamelCase__ = [
nn.functional.pad(
SCREAMING_SNAKE_CASE__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
]
return torch.stack(SCREAMING_SNAKE_CASE__ ), torch.tensor(SCREAMING_SNAKE_CASE__ )
def __call__( self : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any=False ):
with torch.no_grad():
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = [images]
if single_image:
assert len(SCREAMING_SNAKE_CASE__ ) == 1
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(SCREAMING_SNAKE_CASE__ , images.pop(SCREAMING_SNAKE_CASE__ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
SCREAMING_SNAKE_CASE__ , torch.as_tensor(img_tensorize(images.pop(SCREAMING_SNAKE_CASE__ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
lowerCamelCase__ = torch.tensor([im.shape[:2] for im in images] )
lowerCamelCase__ = self.aug(SCREAMING_SNAKE_CASE__ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
lowerCamelCase__ = [self.normalizer(SCREAMING_SNAKE_CASE__ ) for x in images]
# now pad them to do the following operations
lowerCamelCase__ , lowerCamelCase__ = self.pad(SCREAMING_SNAKE_CASE__ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
lowerCamelCase__ = torch.true_divide(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def snake_case ( _a: List[str] , _a: Any )-> Optional[int]:
'''simple docstring'''
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def snake_case ( _a: Dict , _a: Tuple[int, int] )-> Tuple:
'''simple docstring'''
assert torch.isfinite(_a ).all(), "Box tensor contains infinite or NaN!"
lowerCamelCase__ , lowerCamelCase__ = box_size
tensor[:, 0].clamp_(min=0 , max=_a )
tensor[:, 1].clamp_(min=0 , max=_a )
tensor[:, 2].clamp_(min=0 , max=_a )
tensor[:, 3].clamp_(min=0 , max=_a )
| 659 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 659 | 1 |
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger()
@dataclass
class _a :
a_ : nn.Module
a_ : List[nn.Module] = field(default_factory=SCREAMING_SNAKE_CASE_ )
a_ : list = field(default_factory=SCREAMING_SNAKE_CASE_ )
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tensor , SCREAMING_SNAKE_CASE__ : Tensor ):
lowerCamelCase__ = len(list(m.modules() ) ) == 1 or isinstance(SCREAMING_SNAKE_CASE__ , nn.Convad ) or isinstance(SCREAMING_SNAKE_CASE__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(SCREAMING_SNAKE_CASE__ )
def __call__( self : Tuple , SCREAMING_SNAKE_CASE__ : Tensor ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(SCREAMING_SNAKE_CASE__ )
[x.remove() for x in self.handles]
return self
@property
def _UpperCamelCase ( self : Any ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda SCREAMING_SNAKE_CASE__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class _a :
a_ : nn.Module
a_ : nn.Module
a_ : int = 0
a_ : List = field(default_factory=SCREAMING_SNAKE_CASE_ )
a_ : List = field(default_factory=SCREAMING_SNAKE_CASE_ )
def __call__( self : Tuple , SCREAMING_SNAKE_CASE__ : Tensor ):
lowerCamelCase__ = Tracker(self.dest )(SCREAMING_SNAKE_CASE__ ).parametrized
lowerCamelCase__ = Tracker(self.src )(SCREAMING_SNAKE_CASE__ ).parametrized
lowerCamelCase__ = list(filter(lambda SCREAMING_SNAKE_CASE__ : type(SCREAMING_SNAKE_CASE__ ) not in self.src_skip , SCREAMING_SNAKE_CASE__ ) )
lowerCamelCase__ = list(filter(lambda SCREAMING_SNAKE_CASE__ : type(SCREAMING_SNAKE_CASE__ ) not in self.dest_skip , SCREAMING_SNAKE_CASE__ ) )
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise Exception(
F'Numbers of operations are different. Source module has {len(SCREAMING_SNAKE_CASE__ )} operations while'
F' destination module has {len(SCREAMING_SNAKE_CASE__ )}.' )
for dest_m, src_m in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'Transfered from={src_m} to={dest_m}' )
def snake_case ( _a: str , _a: ResNetConfig , _a: Path , _a: bool = True )-> str:
'''simple docstring'''
print(F'Converting {name}...' )
with torch.no_grad():
lowerCamelCase__ = timm.create_model(_a , pretrained=_a ).eval()
lowerCamelCase__ = ResNetForImageClassification(_a ).eval()
lowerCamelCase__ = ModuleTransfer(src=_a , dest=_a )
lowerCamelCase__ = torch.randn((1, 3, 224, 224) )
module_transfer(_a )
assert torch.allclose(from_model(_a ) , our_model(_a ).logits ), "The model logits don't match the original one."
lowerCamelCase__ = F'resnet{"-".join(name.split("resnet" ) )}'
print(_a )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=_a , )
# we can use the convnext one
lowerCamelCase__ = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=_a , )
print(F'Pushed {checkpoint_name}' )
def snake_case ( _a: Path , _a: str = None , _a: bool = True )-> int:
'''simple docstring'''
lowerCamelCase__ = 'imagenet-1k-id2label.json'
lowerCamelCase__ = 1000
lowerCamelCase__ = (1, num_labels)
lowerCamelCase__ = 'huggingface/label-files'
lowerCamelCase__ = num_labels
lowerCamelCase__ = json.load(open(hf_hub_download(_a , _a , repo_type='dataset' ) , 'r' ) )
lowerCamelCase__ = {int(_a ): v for k, v in idalabel.items()}
lowerCamelCase__ = idalabel
lowerCamelCase__ = {v: k for k, v in idalabel.items()}
lowerCamelCase__ = partial(_a , num_labels=_a , idalabel=_a , labelaid=_a )
lowerCamelCase__ = {
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(_a , names_to_config[model_name] , _a , _a )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(_a , _a , _a , _a )
return config, expected_shape
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported resnet* architecture,"
" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
_snake_case = parser.parse_args()
_snake_case = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 659 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Union[str, Any] = 'swinv2'
a_ : Optional[int] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : int=2_24 , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : List[Any]=3 , SCREAMING_SNAKE_CASE__ : str=96 , SCREAMING_SNAKE_CASE__ : Dict=[2, 2, 6, 2] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=[3, 6, 12, 24] , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=4.0 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.0 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Tuple="gelu" , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : List[Any]=1e-5 , SCREAMING_SNAKE_CASE__ : int=32 , **SCREAMING_SNAKE_CASE__ : List[str] , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = embed_dim
lowerCamelCase__ = depths
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = num_heads
lowerCamelCase__ = window_size
lowerCamelCase__ = mlp_ratio
lowerCamelCase__ = qkv_bias
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = drop_path_rate
lowerCamelCase__ = hidden_act
lowerCamelCase__ = use_absolute_embeddings
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = initializer_range
lowerCamelCase__ = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase__ = int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE__ ) - 1) )
lowerCamelCase__ = (0, 0, 0, 0)
| 659 | 1 |
"""simple docstring"""
def snake_case ( _a: int = 4000000 )-> int:
'''simple docstring'''
lowerCamelCase__ = [0, 1]
lowerCamelCase__ = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowerCamelCase__ = 0
for j in range(len(_a ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 659 |
"""simple docstring"""
def snake_case ( _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def snake_case ( _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = 0
while number > 0:
lowerCamelCase__ = number % 10
sum_of_digits += last_digit
lowerCamelCase__ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def snake_case ( _a: int = 100 )-> int:
'''simple docstring'''
lowerCamelCase__ = factorial(_a )
lowerCamelCase__ = split_and_add(_a )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 659 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def snake_case ( _a: list[Any] )-> None:
'''simple docstring'''
create_state_space_tree(_a , [] , 0 )
def snake_case ( _a: list[Any] , _a: list[Any] , _a: int )-> None:
'''simple docstring'''
if index == len(_a ):
print(_a )
return
create_state_space_tree(_a , _a , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(_a , _a , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
_snake_case = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["A", "B", "C"])
generate_all_subsequences(seq)
| 659 |
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_snake_case = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
_snake_case = 10
_snake_case = 256
def snake_case ( _a: List[str] )-> Optional[MinHash]:
'''simple docstring'''
if len(_a ) < MIN_NUM_TOKENS:
return None
lowerCamelCase__ = MinHash(num_perm=_a )
for token in set(_a ):
min_hash.update(token.encode() )
return min_hash
def snake_case ( _a: str )-> Set[str]:
'''simple docstring'''
return {t for t in NON_ALPHA.split(_a ) if len(t.strip() ) > 0}
class _a :
def __init__( self : List[Any] , *,
SCREAMING_SNAKE_CASE__ : float = 0.85 , ):
lowerCamelCase__ = duplication_jaccard_threshold
lowerCamelCase__ = NUM_PERM
lowerCamelCase__ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
lowerCamelCase__ = defaultdict(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : MinHash ):
lowerCamelCase__ = self._index.query(SCREAMING_SNAKE_CASE__ )
if code_key in self._index.keys:
print(F'Duplicate key {code_key}' )
return
self._index.insert(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(SCREAMING_SNAKE_CASE__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = []
for base, duplicates in self._duplicate_clusters.items():
lowerCamelCase__ = [base] + list(SCREAMING_SNAKE_CASE__ )
# reformat the cluster to be a list of dict
lowerCamelCase__ = [{'base_index': el[0], 'repo_name': el[1], 'path': el[2]} for el in cluster]
duplicate_clusters.append(SCREAMING_SNAKE_CASE__ )
return duplicate_clusters
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = self.get_duplicate_clusters()
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case ( _a: Union[str, Any] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = element
lowerCamelCase__ = get_min_hash([t for t in NON_ALPHA.split(data['content'] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def snake_case ( _a: Type[Dataset] )-> Tuple:
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_a , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def snake_case ( _a: Type[Dataset] , _a: float )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = DuplicationIndex(duplication_jaccard_threshold=_a )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_a ) ) , max_queue_size=100 ) ):
di.add(_a , _a )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def snake_case ( _a: str , _a: str )-> float:
'''simple docstring'''
lowerCamelCase__ = get_tokens(_a )
lowerCamelCase__ = get_tokens(_a )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_snake_case = None
def snake_case ( _a: Dict , _a: Union[str, Any] )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = []
for elementa in cluster:
lowerCamelCase__ = _shared_dataset[elementa['base_index']]['content']
for elementa in extremes:
lowerCamelCase__ = _shared_dataset[elementa['base_index']]['content']
if jaccard_similarity(_a , _a ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowerCamelCase__ = 1
extremes.append(_a )
return extremes
def snake_case ( _a: Any , _a: Tuple , _a: Dict )-> Union[str, Any]:
'''simple docstring'''
global _shared_dataset
lowerCamelCase__ = dataset
lowerCamelCase__ = []
lowerCamelCase__ = partial(_find_cluster_extremes_shared , jaccard_threshold=_a )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_a , _a , ) , total=len(_a ) , ):
extremes_list.append(_a )
return extremes_list
def snake_case ( _a: Type[Dataset] , _a: float = 0.85 )-> Tuple[Type[Dataset], List[List[Dict]]]:
'''simple docstring'''
lowerCamelCase__ = make_duplicate_clusters(_a , _a )
lowerCamelCase__ = {x['base_index'] for cluster in duplicate_clusters for x in cluster}
lowerCamelCase__ = {}
lowerCamelCase__ = find_extremes(_a , _a , _a )
for extremes in extremes_clusters:
for element in extremes:
lowerCamelCase__ = element
lowerCamelCase__ = duplicate_indices - set(extreme_dict.keys() )
lowerCamelCase__ = dataset.filter(lambda _a , _a : idx not in remove_indices , with_indices=_a )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowerCamelCase__ = element['base_index'] in extreme_dict
if element["is_extreme"]:
lowerCamelCase__ = extreme_dict[element['base_index']]['copies']
print(F'Original dataset size: {len(_a )}' )
print(F'Number of duplicate clusters: {len(_a )}' )
print(F'Files in duplicate cluster: {len(_a )}' )
print(F'Unique files in duplicate cluster: {len(_a )}' )
print(F'Filtered dataset size: {len(_a )}' )
return ds_filter, duplicate_clusters
| 659 | 1 |
"""simple docstring"""
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
_snake_case = logging.getLogger(__name__)
_snake_case = 50 # max width of layer names
_snake_case = 70 # max width of quantizer names
def snake_case ( _a: Optional[int] )-> Tuple:
'''simple docstring'''
lowerCamelCase__ = parser.add_argument_group('quant_trainer arguments' )
group.add_argument('--wprec' , type=_a , default=8 , help='weight precision' )
group.add_argument('--aprec' , type=_a , default=8 , help='activation precision' )
group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' )
group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' )
group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' )
group.add_argument('--quant-disable-keyword' , type=_a , nargs='+' , help='disable quantizers by keyword' )
group.add_argument('--quant-disable-layer-module' , type=_a , help='disable quantizers by keyword under layer.' )
group.add_argument('--quant-enable-layer-module' , type=_a , help='enable quantizers by keyword under layer' )
group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' )
group.add_argument('--percentile' , default=_a , type=_a , help='percentile for PercentileCalibrator' )
group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' )
group.add_argument('--clip-gelu' , metavar='N' , type=_a , help='clip gelu output maximum value to N' )
group.add_argument(
'--recalibrate-weights' , action='store_true' , help=(
'recalibrate weight amaxes by taking the max of the weights.'
' amaxes will be computed with the current quantization granularity (axis).'
) , )
def snake_case ( _a: Tuple )-> List[str]:
'''simple docstring'''
if args.calibrator == "max":
lowerCamelCase__ = 'max'
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('Specify --percentile when using percentile calibrator' )
lowerCamelCase__ = 'histogram'
elif args.calibrator == "mse":
lowerCamelCase__ = 'histogram'
else:
raise ValueError(F'Invalid calibrator {args.calibrator}' )
lowerCamelCase__ = QuantDescriptor(num_bits=args.aprec , calib_method=_a )
lowerCamelCase__ = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(_a )
quant_nn.QuantLinear.set_default_quant_desc_weight(_a )
def snake_case ( _a: Union[str, Any] , _a: Optional[int] , _a: int=False , _a: Optional[Any]=False )-> Union[str, Any]:
'''simple docstring'''
logger.info('Configuring Model for Quantization' )
logger.info(F'using quantization package {pytorch_quantization.__file__}' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(_a , ['embeddings'] , which='weight' , _disabled=_a )
if args.quant_disable:
set_quantizer_by_name(_a , [''] , _disabled=_a )
if args.quant_disable_keyword:
set_quantizer_by_name(_a , args.quant_disable_keyword , _disabled=_a )
if args.quant_disable_layer_module:
set_quantizer_by_name(_a , [R'layer.\d+.' + args.quant_disable_layer_module] , _disabled=_a )
if args.quant_enable_layer_module:
set_quantizer_by_name(_a , [R'layer.\d+.' + args.quant_enable_layer_module] , _disabled=_a )
if args.recalibrate_weights:
recalibrate_weights(_a )
if args.fuse_qkv:
fuse_qkv(_a , _a )
if args.clip_gelu:
clip_gelu(_a , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(_a )
def snake_case ( _a: Dict )-> Union[str, Any]:
'''simple docstring'''
logger.info('Enabling Calibration' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F'{name:80}: {module}' )
def snake_case ( _a: List[Any] , _a: Optional[int] )-> Tuple:
'''simple docstring'''
logger.info('Loading calibrated amax' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('percentile' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(_a )
def snake_case ( _a: Tuple , _a: str )-> List[Any]:
'''simple docstring'''
def fusea(_a: Dict , _a: Any , _a: Dict ):
for mod in [qq, qk, qv]:
if not hasattr(_a , '_amax' ):
print(' WARNING: NO AMAX BUFFER' )
return
lowerCamelCase__ = qq._amax.detach().item()
lowerCamelCase__ = qk._amax.detach().item()
lowerCamelCase__ = qv._amax.detach().item()
lowerCamelCase__ = max(_a , _a , _a )
qq._amax.fill_(_a )
qk._amax.fill_(_a )
qv._amax.fill_(_a )
logger.info(F' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}' )
for name, mod in model.named_modules():
if name.endswith('.attention.self' ):
logger.info(F'FUSE_QKV: {name:{name_width}}' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def snake_case ( _a: List[Any] , _a: List[Any] )-> Dict:
'''simple docstring'''
for name, mod in model.named_modules():
if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ):
lowerCamelCase__ = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=_a )
lowerCamelCase__ = mod._input_quantizer._amax.data.detach().item()
logger.info(F'CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}' )
def snake_case ( _a: Optional[Any] )-> Optional[int]:
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(_a , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None:
lowerCamelCase__ = mod.weight.shape[0]
lowerCamelCase__ = mod._weight_quantizer._amax.detach()
lowerCamelCase__ = torch.ones(_a , dtype=amax.dtype , device=amax.device ) * amax
print(F'expanding {name} {amax} -> {mod._weight_quantizer._amax}' )
def snake_case ( _a: Union[str, Any] )-> Dict:
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(_a , '_weight_quantizer' ):
if not hasattr(mod.weight_quantizer , '_amax' ):
print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
lowerCamelCase__ = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
lowerCamelCase__ = set(range(len(mod.weight.size() ) ) ) - axis_set
lowerCamelCase__ = pytorch_quantization.utils.reduce_amax(mod.weight , axis=_a , keepdims=_a ).detach()
logger.info(F'RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}' )
lowerCamelCase__ = amax
def snake_case ( _a: Optional[Any] , _a: Dict=25 , _a: Any=180 , _a: Dict=None )-> int:
'''simple docstring'''
if ignore is None:
lowerCamelCase__ = []
elif not isinstance(_a , _a ):
lowerCamelCase__ = [ignore]
lowerCamelCase__ = 0
for name, mod in model.named_modules():
if not hasattr(_a , 'weight' ):
continue
lowerCamelCase__ = max(_a , len(_a ) )
for name, mod in model.named_modules():
lowerCamelCase__ = getattr(_a , '_input_quantizer' , _a )
lowerCamelCase__ = getattr(_a , '_weight_quantizer' , _a )
if not hasattr(_a , 'weight' ):
continue
if type(_a ) in ignore:
continue
if [True for s in ignore if type(_a ) is str and s in name]:
continue
lowerCamelCase__ = F'Act:{input_q.extra_repr()}'
lowerCamelCase__ = F'Wgt:{weight_q.extra_repr()}'
lowerCamelCase__ = F'{name:{name_width}} {act_str} {wgt_str}'
if len(_a ) <= line_width:
logger.info(_a )
else:
logger.info(F'{name:{name_width}} {act_str}' )
logger.info(F'{" ":{name_width}} {wgt_str}' )
def snake_case ( _a: Union[str, Any] )-> Dict:
'''simple docstring'''
lowerCamelCase__ = 0
for name, mod in model.named_modules():
if isinstance(_a , pytorch_quantization.nn.TensorQuantizer ):
print(F'{name:80} {mod}' )
count += 1
print(F'{count} TensorQuantizers found in model' )
def snake_case ( _a: str , _a: Tuple , _a: int , _a: List[str] , _a: Union[str, Any] )-> Any:
'''simple docstring'''
lowerCamelCase__ = getattr(_a , _a , _a )
if quantizer_mod is not None:
assert hasattr(_a , _a )
setattr(_a , _a , _a )
else:
logger.warning(F'{name} has no {quantizer}' )
def snake_case ( _a: Any , _a: Dict , _a: int="both" , **_a: Tuple )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = F'Warning: changing {which} quantizers of {name:{qname_width}}'
for k, v in kwargs.items():
s += F' {k}={v}'
if which in ["input", "both"]:
set_quantizer(_a , _a , '_input_quantizer' , _a , _a )
if which in ["weight", "both"]:
set_quantizer(_a , _a , '_weight_quantizer' , _a , _a )
logger.info(_a )
def snake_case ( _a: Any , _a: str , **_a: Optional[Any] )-> List[Any]:
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(_a , '_input_quantizer' ) or hasattr(_a , '_weight_quantizer' ):
for n in names:
if re.search(_a , _a ):
set_quantizers(_a , _a , **_a )
elif name.endswith('_quantizer' ):
for n in names:
if re.search(_a , _a ):
lowerCamelCase__ = F'Warning: changing {name:{name_width}}'
for k, v in kwargs.items():
s += F' {k}={v}'
setattr(_a , _a , _a )
logger.info(_a )
| 659 |
"""simple docstring"""
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_snake_case = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def snake_case ( _a: Any )-> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = test_results.split(' ' )
lowerCamelCase__ = 0
lowerCamelCase__ = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
lowerCamelCase__ = expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(_a ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def snake_case ( _a: Optional[int] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = {}
lowerCamelCase__ = None
lowerCamelCase__ = False
for line in failures_short_lines.split('\n' ):
if re.search(R'_ \[doctest\]' , _a ):
lowerCamelCase__ = True
lowerCamelCase__ = line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
lowerCamelCase__ = line
lowerCamelCase__ = False
return failures
class _a :
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = title
lowerCamelCase__ = doc_test_results['time_spent'].split(',' )[0]
lowerCamelCase__ = doc_test_results['success']
lowerCamelCase__ = doc_test_results['failures']
lowerCamelCase__ = self.n_success + self.n_failures
# Failures and success of the modeling tests
lowerCamelCase__ = doc_test_results
@property
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = [self._time_spent]
lowerCamelCase__ = 0
for time in time_spent:
lowerCamelCase__ = time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(SCREAMING_SNAKE_CASE__ ) == 1:
lowerCamelCase__ = [0, 0, time_parts[0]]
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return F'{int(SCREAMING_SNAKE_CASE__ )}h{int(SCREAMING_SNAKE_CASE__ )}m{int(SCREAMING_SNAKE_CASE__ )}s'
@property
def _UpperCamelCase ( self : Dict ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _UpperCamelCase ( self : Dict ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def _UpperCamelCase ( self : Any ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
F' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = 40
lowerCamelCase__ = {k: v['failed'] for k, v in doc_test_results.items() if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
lowerCamelCase__ = ''
for category, failures in category_failures.items():
if len(SCREAMING_SNAKE_CASE__ ) == 0:
continue
if report != "":
report += "\n\n"
report += F'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(SCREAMING_SNAKE_CASE__ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(SCREAMING_SNAKE_CASE__ )
@staticmethod
def _UpperCamelCase ( ):
lowerCamelCase__ = [
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(SCREAMING_SNAKE_CASE__ )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : Optional[int] ):
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
lowerCamelCase__ = F'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else 'All tests passed.'
lowerCamelCase__ = client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple ):
lowerCamelCase__ = ''
for key, value in failures.items():
lowerCamelCase__ = value[:2_00] + ' [Truncated]' if len(SCREAMING_SNAKE_CASE__ ) > 2_50 else value
failures_text += F'*{key}*\n_{value}_\n\n'
lowerCamelCase__ = job_name
lowerCamelCase__ = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
lowerCamelCase__ = {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _UpperCamelCase ( self : Optional[int] ):
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
lowerCamelCase__ = self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
lowerCamelCase__ = sorted(self.doc_test_results.items() , key=lambda SCREAMING_SNAKE_CASE__ : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
lowerCamelCase__ = F'*Num failures* :{len(job_result["failed"] )} \n'
lowerCamelCase__ = job_result['failures']
lowerCamelCase__ = self.get_reply_blocks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , text=SCREAMING_SNAKE_CASE__ )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=F'Results for {job}' , blocks=SCREAMING_SNAKE_CASE__ , thread_ts=self.thread_ts['ts'] , )
time.sleep(1 )
def snake_case ( )-> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = os.environ['GITHUB_RUN_ID']
lowerCamelCase__ = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
lowerCamelCase__ = requests.get(_a ).json()
lowerCamelCase__ = {}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
lowerCamelCase__ = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_a ):
lowerCamelCase__ = requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.' , _a )
return {}
def snake_case ( _a: str )-> Dict:
'''simple docstring'''
lowerCamelCase__ = {}
if os.path.exists(_a ):
lowerCamelCase__ = os.listdir(_a )
for file in files:
try:
with open(os.path.join(_a , _a ) , encoding='utf-8' ) as f:
lowerCamelCase__ = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(_a , _a )}.' ) from e
return _artifact
def snake_case ( )-> Optional[int]:
'''simple docstring'''
class _a :
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = name
lowerCamelCase__ = []
def __str__( self : Dict ):
return self.name
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
self.paths.append({'name': self.name, 'path': path} )
lowerCamelCase__ = {}
lowerCamelCase__ = filter(os.path.isdir , os.listdir() )
for directory in directories:
lowerCamelCase__ = directory
if artifact_name not in _available_artifacts:
lowerCamelCase__ = Artifact(_a )
_available_artifacts[artifact_name].add_path(_a )
return _available_artifacts
if __name__ == "__main__":
_snake_case = get_job_links()
_snake_case = retrieve_available_artifacts()
_snake_case = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_snake_case = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_snake_case = github_actions_job_links.get("run_doctests")
_snake_case = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
_snake_case = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
_snake_case , _snake_case , _snake_case = handle_test_results(artifact["stats"])
_snake_case = failed
_snake_case = success
_snake_case = time_spent[1:-1] + ", "
_snake_case = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
_snake_case = line.replace("FAILED ", "")
_snake_case = line.split()[0].replace("\n", "")
if "::" in line:
_snake_case , _snake_case = line.split("::")
else:
_snake_case , _snake_case = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_snake_case = docs[file_regex]
doc_test_results[category]["failed"].append(test)
_snake_case = all_failures[test] if test in all_failures else "N/A"
_snake_case = failure
break
_snake_case = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 659 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"facebook/nllb-large-en-ro": 1024,
"facebook/nllb-200-distilled-600M": 1024,
}
# fmt: off
_snake_case = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Any = VOCAB_FILES_NAMES
a_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a_ : List[str] = ['input_ids', 'attention_mask']
a_ : Union[str, Any] = NllbTokenizer
a_ : List[int] = []
a_ : List[int] = []
def __init__( self : int , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : Any="</s>" , SCREAMING_SNAKE_CASE__ : List[str]="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="<unk>" , SCREAMING_SNAKE_CASE__ : Optional[int]="<pad>" , SCREAMING_SNAKE_CASE__ : Any="<mask>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Tuple=False , **SCREAMING_SNAKE_CASE__ : str , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
lowerCamelCase__ = legacy_behaviour
super().__init__(
vocab_file=SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , legacy_behaviour=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = vocab_file
lowerCamelCase__ = False if not self.vocab_file else True
lowerCamelCase__ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
lowerCamelCase__ = {
lang_code: self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase__ = src_lang if src_lang is not None else 'eng_Latn'
lowerCamelCase__ = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _UpperCamelCase ( self : str ):
return self._src_lang
@src_lang.setter
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] , SCREAMING_SNAKE_CASE__ : Optional[str] , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowerCamelCase__ = src_lang
lowerCamelCase__ = self(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tgt_lang_id
return inputs
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str = "eng_Latn" , SCREAMING_SNAKE_CASE__ : Optional[List[str]] = None , SCREAMING_SNAKE_CASE__ : str = "fra_Latn" , **SCREAMING_SNAKE_CASE__ : Dict , ):
lowerCamelCase__ = src_lang
lowerCamelCase__ = tgt_lang
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[str] ):
return self.set_src_lang_special_tokens(self.src_lang )
def _UpperCamelCase ( self : List[Any] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
lowerCamelCase__ = []
lowerCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ = [self.cur_lang_code]
lowerCamelCase__ = [self.eos_token_id]
lowerCamelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
lowerCamelCase__ = []
lowerCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ = [self.cur_lang_code]
lowerCamelCase__ = [self.eos_token_id]
lowerCamelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
lowerCamelCase__ = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 659 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n"
@dataclass
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Union[PIL.Image.Image, np.ndarray]
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : PriorTransformer , SCREAMING_SNAKE_CASE__ : CLIPVisionModel , SCREAMING_SNAKE_CASE__ : CLIPImageProcessor , SCREAMING_SNAKE_CASE__ : HeunDiscreteScheduler , SCREAMING_SNAKE_CASE__ : ShapERenderer , ):
super().__init__()
self.register_modules(
prior=SCREAMING_SNAKE_CASE__ , image_encoder=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , renderer=SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ):
if latents is None:
lowerCamelCase__ = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowerCamelCase__ = latents.to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = latents * scheduler.init_noise_sigma
return latents
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowerCamelCase__ = torch.device(F'cuda:{gpu_id}' )
lowerCamelCase__ = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@property
def _UpperCamelCase ( self : Dict ):
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(SCREAMING_SNAKE_CASE__ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(image[0] , torch.Tensor ):
lowerCamelCase__ = torch.cat(SCREAMING_SNAKE_CASE__ , axis=0 ) if image[0].ndim == 4 else torch.stack(SCREAMING_SNAKE_CASE__ , axis=0 )
if not isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
lowerCamelCase__ = self.image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
lowerCamelCase__ = image.to(dtype=self.image_encoder.dtype , device=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.image_encoder(SCREAMING_SNAKE_CASE__ )['last_hidden_state']
lowerCamelCase__ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowerCamelCase__ = image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE__ , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase__ = torch.zeros_like(SCREAMING_SNAKE_CASE__ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase__ = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(SCREAMING_SNAKE_CASE__ )
def __call__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[PIL.Image.Image, List[PIL.Image.Image]] , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : int = 25 , SCREAMING_SNAKE_CASE__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE__ : float = 4.0 , SCREAMING_SNAKE_CASE__ : int = 64 , SCREAMING_SNAKE_CASE__ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE__ : bool = True , ):
if isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ):
lowerCamelCase__ = 1
elif isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
lowerCamelCase__ = image.shape[0]
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(
F'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(SCREAMING_SNAKE_CASE__ )}' )
lowerCamelCase__ = self._execution_device
lowerCamelCase__ = batch_size * num_images_per_prompt
lowerCamelCase__ = guidance_scale > 1.0
lowerCamelCase__ = self._encode_image(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# prior
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.scheduler.timesteps
lowerCamelCase__ = self.prior.config.num_embeddings
lowerCamelCase__ = self.prior.config.embedding_dim
lowerCamelCase__ = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowerCamelCase__ = latents.reshape(latents.shape[0] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE__ ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase__ = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.prior(
SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , proj_embedding=SCREAMING_SNAKE_CASE__ , ).predicted_image_embedding
# remove the variance
lowerCamelCase__ , lowerCamelCase__ = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowerCamelCase__ , lowerCamelCase__ = noise_pred.chunk(2 )
lowerCamelCase__ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowerCamelCase__ = self.scheduler.step(
SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , sample=SCREAMING_SNAKE_CASE__ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = []
for i, latent in enumerate(SCREAMING_SNAKE_CASE__ ):
print()
lowerCamelCase__ = self.renderer.decode(
latent[None, :] , SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , ray_batch_size=40_96 , n_coarse_samples=64 , n_fine_samples=1_28 , )
images.append(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.stack(SCREAMING_SNAKE_CASE__ )
if output_type not in ["np", "pil"]:
raise ValueError(F'Only the output types `pil` and `np` are supported not output_type={output_type}' )
lowerCamelCase__ = images.cpu().numpy()
if output_type == "pil":
lowerCamelCase__ = [self.numpy_to_pil(SCREAMING_SNAKE_CASE__ ) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=SCREAMING_SNAKE_CASE__ )
| 659 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class _a ( unittest.TestCase ):
a_ : Dict = StableDiffusionLDMaDPipeline
a_ : int = TEXT_TO_IMAGE_PARAMS
a_ : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
a_ : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def _UpperCamelCase ( self : List[Any] ):
torch.manual_seed(0 )
lowerCamelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
lowerCamelCase__ = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=SCREAMING_SNAKE_CASE__ , set_alpha_to_one=SCREAMING_SNAKE_CASE__ , )
torch.manual_seed(0 )
lowerCamelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
lowerCamelCase__ = CLIPTextModel(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCamelCase__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 ):
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
lowerCamelCase__ = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
lowerCamelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ = self.get_dummy_components()
lowerCamelCase__ = StableDiffusionLDMaDPipeline(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = ldmad_pipe.to(SCREAMING_SNAKE_CASE__ )
ldmad_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = ldmad_pipe(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ , lowerCamelCase__ = output.rgb, output.depth
lowerCamelCase__ = rgb[0, -3:, -3:, -1]
lowerCamelCase__ = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
lowerCamelCase__ = np.array(
[0.37_33_81_76, 0.7_02_47, 0.74_20_31_93, 0.51_64_36_04, 0.58_25_67_93, 0.60_93_21_36, 0.4_18_10_95, 0.48_35_58_77, 0.46_53_52_62] )
lowerCamelCase__ = np.array([1_03.4_67_27, 85.81_20_04, 87.84_92_36] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ = self.get_dummy_components()
lowerCamelCase__ = StableDiffusionLDMaDPipeline(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = ldmad_pipe.to(SCREAMING_SNAKE_CASE__ )
ldmad_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = 3 * [inputs['prompt']]
# forward
lowerCamelCase__ = ldmad_pipe(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ , lowerCamelCase__ = output.rgb, output.depth
lowerCamelCase__ = rgb_slice_a[0, -3:, -3:, -1]
lowerCamelCase__ = depth_slice_a[0, -3:, -1]
lowerCamelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = 3 * [inputs.pop('prompt' )]
lowerCamelCase__ = ldmad_pipe.tokenizer(
SCREAMING_SNAKE_CASE__ , padding='max_length' , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE__ , return_tensors='pt' , )
lowerCamelCase__ = text_inputs['input_ids'].to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = ldmad_pipe.text_encoder(SCREAMING_SNAKE_CASE__ )[0]
lowerCamelCase__ = prompt_embeds
# forward
lowerCamelCase__ = ldmad_pipe(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ , lowerCamelCase__ = output.rgb, output.depth
lowerCamelCase__ = rgb_slice_a[0, -3:, -3:, -1]
lowerCamelCase__ = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ = self.get_dummy_components()
lowerCamelCase__ = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = StableDiffusionLDMaDPipeline(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = ldmad_pipe.to(SCREAMING_SNAKE_CASE__ )
ldmad_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = 'french fries'
lowerCamelCase__ = ldmad_pipe(**SCREAMING_SNAKE_CASE__ , negative_prompt=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ , lowerCamelCase__ = output.rgb, output.depth
lowerCamelCase__ = rgb[0, -3:, -3:, -1]
lowerCamelCase__ = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
lowerCamelCase__ = np.array(
[0.3_70_44, 0.71_81_15_03, 0.7_22_32_51, 0.48_60_36_75, 0.5_63_83_91, 0.6_36_49_48, 0.42_83_37_04, 0.4_90_13_15, 0.47_92_62_17] )
lowerCamelCase__ = np.array([1_07.8_47_38, 84.6_28_02, 89.96_21_35] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def _UpperCamelCase ( self : Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int]="cpu" , SCREAMING_SNAKE_CASE__ : int=torch.floataa , SCREAMING_SNAKE_CASE__ : Tuple=0 ):
lowerCamelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = np.random.RandomState(SCREAMING_SNAKE_CASE__ ).standard_normal((1, 4, 64, 64) )
lowerCamelCase__ = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).to(device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' )
lowerCamelCase__ = ldmad_pipe.to(SCREAMING_SNAKE_CASE__ )
ldmad_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.get_inputs(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = ldmad_pipe(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ , lowerCamelCase__ = output.rgb, output.depth
lowerCamelCase__ = rgb[0, -3:, -3:, -1].flatten()
lowerCamelCase__ = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 5_12, 5_12, 3)
assert depth.shape == (1, 5_12, 5_12)
lowerCamelCase__ = np.array(
[0.53_80_54_65, 0.56_70_73_05, 0.5_48_65_15, 0.57_01_22_36, 0.5_81_45_11, 0.56_25_34_87, 0.54_84_30_14, 0.55_09_22_63, 0.6_45_97_06] )
lowerCamelCase__ = np.array(
[0.9_26_37_81, 0.6_67_86_72, 0.5_48_65_15, 0.92_20_21_45, 0.67_83_11_35, 0.56_25_34_87, 0.9_24_16_94, 0.7_55_14_78, 0.6_45_97_06] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class _a ( unittest.TestCase ):
def _UpperCamelCase ( self : Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]="cpu" , SCREAMING_SNAKE_CASE__ : Dict=torch.floataa , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 ):
lowerCamelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = np.random.RandomState(SCREAMING_SNAKE_CASE__ ).standard_normal((1, 4, 64, 64) )
lowerCamelCase__ = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).to(device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def _UpperCamelCase ( self : Optional[Any] ):
lowerCamelCase__ = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' ).to(SCREAMING_SNAKE_CASE__ )
ldmad_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.get_inputs(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = ldmad_pipe(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ , lowerCamelCase__ = output.rgb, output.depth
lowerCamelCase__ = 0.49_55_86
lowerCamelCase__ = 0.33_79_55_15
lowerCamelCase__ = 1_12.4_85_18
lowerCamelCase__ = 98.48_97_46
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d-4c' ).to(SCREAMING_SNAKE_CASE__ )
ldmad_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.get_inputs(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = ldmad_pipe(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ , lowerCamelCase__ = output.rgb, output.depth
lowerCamelCase__ = 0.4_19_41_27
lowerCamelCase__ = 0.35_37_55_86
lowerCamelCase__ = 0.5_63_85_02
lowerCamelCase__ = 0.34_68_61_03
assert rgb.shape == (1, 5_12, 5_12, 3)
assert depth.shape == (1, 5_12, 5_12, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 659 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"facebook/nllb-large-en-ro": 1024,
"facebook/nllb-200-distilled-600M": 1024,
}
# fmt: off
_snake_case = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Any = VOCAB_FILES_NAMES
a_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a_ : List[str] = ['input_ids', 'attention_mask']
a_ : Union[str, Any] = NllbTokenizer
a_ : List[int] = []
a_ : List[int] = []
def __init__( self : int , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : Any="</s>" , SCREAMING_SNAKE_CASE__ : List[str]="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="<unk>" , SCREAMING_SNAKE_CASE__ : Optional[int]="<pad>" , SCREAMING_SNAKE_CASE__ : Any="<mask>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Tuple=False , **SCREAMING_SNAKE_CASE__ : str , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
lowerCamelCase__ = legacy_behaviour
super().__init__(
vocab_file=SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , legacy_behaviour=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = vocab_file
lowerCamelCase__ = False if not self.vocab_file else True
lowerCamelCase__ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
lowerCamelCase__ = {
lang_code: self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase__ = src_lang if src_lang is not None else 'eng_Latn'
lowerCamelCase__ = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _UpperCamelCase ( self : str ):
return self._src_lang
@src_lang.setter
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] , SCREAMING_SNAKE_CASE__ : Optional[str] , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowerCamelCase__ = src_lang
lowerCamelCase__ = self(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tgt_lang_id
return inputs
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str = "eng_Latn" , SCREAMING_SNAKE_CASE__ : Optional[List[str]] = None , SCREAMING_SNAKE_CASE__ : str = "fra_Latn" , **SCREAMING_SNAKE_CASE__ : Dict , ):
lowerCamelCase__ = src_lang
lowerCamelCase__ = tgt_lang
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[str] ):
return self.set_src_lang_special_tokens(self.src_lang )
def _UpperCamelCase ( self : List[Any] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
lowerCamelCase__ = []
lowerCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ = [self.cur_lang_code]
lowerCamelCase__ = [self.eos_token_id]
lowerCamelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
lowerCamelCase__ = []
lowerCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ = [self.cur_lang_code]
lowerCamelCase__ = [self.eos_token_id]
lowerCamelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
lowerCamelCase__ = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 659 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
_snake_case = ["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 659 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class _a :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Any=99 , SCREAMING_SNAKE_CASE__ : Optional[Any]=32 , SCREAMING_SNAKE_CASE__ : str=32 , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=37 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : str=5_12 , SCREAMING_SNAKE_CASE__ : str=0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : Any=None , ):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = projection_dim
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = dropout
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = initializer_range
lowerCamelCase__ = scope
lowerCamelCase__ = bos_token_id
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
lowerCamelCase__ = input_mask.numpy()
lowerCamelCase__ , lowerCamelCase__ = input_mask.shape
lowerCamelCase__ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = 1
lowerCamelCase__ = 0
lowerCamelCase__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Any ):
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ):
lowerCamelCase__ = TFBlipTextModel(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = config_and_inputs
lowerCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _a ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : str = (TFBlipTextModel,) if is_tf_available() else ()
a_ : List[str] = False
a_ : Optional[Any] = False
a_ : Union[str, Any] = False
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = BlipTextModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def _UpperCamelCase ( self : Tuple ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Tuple ):
pass
def _UpperCamelCase ( self : Tuple ):
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def _UpperCamelCase ( self : List[str] ):
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _UpperCamelCase ( self : Dict ):
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _UpperCamelCase ( self : List[Any] ):
pass
@slow
def _UpperCamelCase ( self : str ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFBlipTextModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=SCREAMING_SNAKE_CASE__ )
| 659 | 1 |
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def snake_case ( _a: int = 8 )-> str:
'''simple docstring'''
lowerCamelCase__ = ascii_letters + digits + punctuation
return "".join(secrets.choice(_a ) for _ in range(_a ) )
def snake_case ( _a: str , _a: int )-> str:
'''simple docstring'''
i -= len(_a )
lowerCamelCase__ = i // 3
lowerCamelCase__ = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
lowerCamelCase__ = (
chars_incl
+ random(_a , quotient + remainder )
+ random(_a , _a )
+ random(_a , _a )
)
lowerCamelCase__ = list(_a )
shuffle(_a )
return "".join(_a )
# random is a generalised function for letters, characters and numbers
def snake_case ( _a: str , _a: int )-> str:
'''simple docstring'''
return "".join(secrets.choice(_a ) for _ in range(_a ) )
def snake_case ( _a: Dict , _a: Any )-> Any:
'''simple docstring'''
pass # Put your code here...
def snake_case ( _a: str , _a: int )-> Optional[Any]:
'''simple docstring'''
pass # Put your code here...
def snake_case ( _a: Any , _a: str )-> str:
'''simple docstring'''
pass # Put your code here...
def snake_case ( _a: str , _a: int = 8 )-> bool:
'''simple docstring'''
if len(_a ) < min_length:
# Your Password must be at least 8 characters long
return False
lowerCamelCase__ = any(char in ascii_uppercase for char in password )
lowerCamelCase__ = any(char in ascii_lowercase for char in password )
lowerCamelCase__ = any(char in digits for char in password )
lowerCamelCase__ = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def snake_case ( )-> Dict:
'''simple docstring'''
lowerCamelCase__ = int(input('Please indicate the max length of your password: ' ).strip() )
lowerCamelCase__ = input(
'Please indicate the characters that must be in your password: ' ).strip()
print('Password generated:' , password_generator(_a ) )
print(
'Alternative Password generated:' , alternative_password_generator(_a , _a ) , )
print('[If you are thinking of using this passsword, You better save it.]' )
if __name__ == "__main__":
main()
| 659 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 659 | 1 |
"""simple docstring"""
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _a ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Union[str, Any] = MobileBertTokenizer
a_ : Any = MobileBertTokenizerFast
a_ : Union[str, Any] = True
a_ : List[Any] = True
a_ : str = filter_non_english
a_ : int = 'google/mobilebert-uncased'
def _UpperCamelCase ( self : Dict ):
super().setUp()
lowerCamelCase__ = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
lowerCamelCase__ = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple ):
lowerCamelCase__ = 'UNwant\u00E9d,running'
lowerCamelCase__ = 'unwanted, running'
return input_text, output_text
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = self.tokenizer_class(self.vocab_file )
lowerCamelCase__ = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [9, 6, 7, 12, 10, 11] )
def _UpperCamelCase ( self : Dict ):
if not self.test_rust_tokenizer:
return
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = self.get_rust_tokenizer()
lowerCamelCase__ = 'UNwant\u00E9d,running'
lowerCamelCase__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.get_rust_tokenizer()
lowerCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# With lower casing
lowerCamelCase__ = self.get_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.get_rust_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = 'UNwant\u00E9d,running'
lowerCamelCase__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.get_rust_tokenizer()
lowerCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
lowerCamelCase__ = {}
for i, token in enumerate(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = i
lowerCamelCase__ = WordpieceTokenizer(vocab=SCREAMING_SNAKE_CASE__ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def _UpperCamelCase ( self : Union[str, Any] ):
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def _UpperCamelCase ( self : Dict ):
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def _UpperCamelCase ( self : int ):
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
@slow
def _UpperCamelCase ( self : Optional[Any] ):
lowerCamelCase__ = self.tokenizer_class.from_pretrained('google/mobilebert-uncased' )
lowerCamelCase__ = tokenizer.encode('sequence builders' , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.encode('multi-sequence build' , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert encoded_sentence == [1_01] + text + [1_02]
assert encoded_pair == [1_01] + text + [1_02] + text_a + [1_02]
def _UpperCamelCase ( self : int ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = F'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
lowerCamelCase__ = tokenizer_r.encode_plus(
SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = tokenizer_r.do_lower_case if hasattr(SCREAMING_SNAKE_CASE__ , 'do_lower_case' ) else False
lowerCamelCase__ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ = ['的', '人', '有']
lowerCamelCase__ = ''.join(SCREAMING_SNAKE_CASE__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCamelCase__ = True
lowerCamelCase__ = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_p.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_r.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = False
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_r.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_p.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCamelCase__ = [
F'##{token}' if idx != 0 else token for idx, token in enumerate(SCREAMING_SNAKE_CASE__ )
]
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 659 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_snake_case = [
"small",
"small-base",
"medium",
"medium-base",
"intermediate",
"intermediate-base",
"large",
"large-base",
"xlarge",
"xlarge-base",
]
_snake_case = {
"vocab_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json",
"funnel-transformer/small-base": (
"https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"
),
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json",
"funnel-transformer/large-base": (
"https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"
),
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"
),
},
}
_snake_case = {f"""funnel-transformer/{name}""": 512 for name in _model_names}
_snake_case = {f"""funnel-transformer/{name}""": {"do_lower_case": True} for name in _model_names}
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : List[str] = VOCAB_FILES_NAMES
a_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
a_ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
a_ : List[str] = FunnelTokenizer
a_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : int = 2
def __init__( self : int , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Any="<unk>" , SCREAMING_SNAKE_CASE__ : List[Any]="<sep>" , SCREAMING_SNAKE_CASE__ : int="<pad>" , SCREAMING_SNAKE_CASE__ : Tuple="<cls>" , SCREAMING_SNAKE_CASE__ : Tuple="<mask>" , SCREAMING_SNAKE_CASE__ : Any="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="</s>" , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : int="##" , **SCREAMING_SNAKE_CASE__ : Any , ):
super().__init__(
SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , clean_text=SCREAMING_SNAKE_CASE__ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ , wordpieces_prefix=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , SCREAMING_SNAKE_CASE__ ) != do_lower_case
or normalizer_state.get('strip_accents' , SCREAMING_SNAKE_CASE__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , SCREAMING_SNAKE_CASE__ ) != tokenize_chinese_chars
):
lowerCamelCase__ = getattr(SCREAMING_SNAKE_CASE__ , normalizer_state.pop('type' ) )
lowerCamelCase__ = do_lower_case
lowerCamelCase__ = strip_accents
lowerCamelCase__ = tokenize_chinese_chars
lowerCamelCase__ = normalizer_class(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = do_lower_case
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
lowerCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
lowerCamelCase__ = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
| 659 | 1 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
_snake_case = logging.getLogger(__name__)
@dataclass
class _a :
a_ : Optional[int] = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
a_ : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
a_ : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
a_ : Optional[int] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
a_ : Optional[int] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
a_ : Optional[int] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
@dataclass
class _a :
a_ : str = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
a_ : str = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'Evaluation language. Also train language if `train_language` is set to None.'} )
a_ : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'Train language if it is different from the evaluation language.'} )
a_ : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
a_ : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
a_ : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
a_ : Optional[bool] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'} , )
a_ : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
a_ : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
a_ : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
a_ : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def snake_case ( )-> Dict:
'''simple docstring'''
lowerCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_xnli' , _a )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase__ = training_args.get_process_log_level()
logger.setLevel(_a )
datasets.utils.logging.set_verbosity(_a )
transformers.utils.logging.set_verbosity(_a )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
lowerCamelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
lowerCamelCase__ = load_dataset(
'xnli' , model_args.language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowerCamelCase__ = load_dataset(
'xnli' , model_args.train_language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase__ = train_dataset.features['label'].names
if training_args.do_eval:
lowerCamelCase__ = load_dataset(
'xnli' , model_args.language , split='validation' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase__ = eval_dataset.features['label'].names
if training_args.do_predict:
lowerCamelCase__ = load_dataset(
'xnli' , model_args.language , split='test' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase__ = predict_dataset.features['label'].names
# Labels
lowerCamelCase__ = len(_a )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_a , idalabel={str(_a ): label for i, label in enumerate(_a )} , labelaid={label: i for i, label in enumerate(_a )} , finetuning_task='xnli' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase__ = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_a , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
lowerCamelCase__ = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCamelCase__ = False
def preprocess_function(_a: List[str] ):
# Tokenize the texts
return tokenizer(
examples['premise'] , examples['hypothesis'] , padding=_a , max_length=data_args.max_seq_length , truncation=_a , )
if training_args.do_train:
if data_args.max_train_samples is not None:
lowerCamelCase__ = min(len(_a ) , data_args.max_train_samples )
lowerCamelCase__ = train_dataset.select(range(_a ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
lowerCamelCase__ = train_dataset.map(
_a , batched=_a , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on train dataset' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(_a ) ) , 3 ):
logger.info(F'Sample {index} of the training set: {train_dataset[index]}.' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowerCamelCase__ = min(len(_a ) , data_args.max_eval_samples )
lowerCamelCase__ = eval_dataset.select(range(_a ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
lowerCamelCase__ = eval_dataset.map(
_a , batched=_a , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on validation dataset' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
lowerCamelCase__ = min(len(_a ) , data_args.max_predict_samples )
lowerCamelCase__ = predict_dataset.select(range(_a ) )
with training_args.main_process_first(desc='prediction dataset map pre-processing' ):
lowerCamelCase__ = predict_dataset.map(
_a , batched=_a , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on prediction dataset' , )
# Get the metric function
lowerCamelCase__ = evaluate.load('xnli' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_a: EvalPrediction ):
lowerCamelCase__ = p.predictions[0] if isinstance(p.predictions , _a ) else p.predictions
lowerCamelCase__ = np.argmax(_a , axis=1 )
return metric.compute(predictions=_a , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCamelCase__ = default_data_collator
elif training_args.fpaa:
lowerCamelCase__ = DataCollatorWithPadding(_a , pad_to_multiple_of=8 )
else:
lowerCamelCase__ = None
# Initialize our Trainer
lowerCamelCase__ = Trainer(
model=_a , args=_a , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_a , tokenizer=_a , data_collator=_a , )
# Training
if training_args.do_train:
lowerCamelCase__ = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase__ = last_checkpoint
lowerCamelCase__ = trainer.train(resume_from_checkpoint=_a )
lowerCamelCase__ = train_result.metrics
lowerCamelCase__ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_a )
)
lowerCamelCase__ = min(_a , len(_a ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , _a )
trainer.save_metrics('train' , _a )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowerCamelCase__ = trainer.evaluate(eval_dataset=_a )
lowerCamelCase__ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_a )
lowerCamelCase__ = min(_a , len(_a ) )
trainer.log_metrics('eval' , _a )
trainer.save_metrics('eval' , _a )
# Prediction
if training_args.do_predict:
logger.info('*** Predict ***' )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = trainer.predict(_a , metric_key_prefix='predict' )
lowerCamelCase__ = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(_a )
)
lowerCamelCase__ = min(_a , len(_a ) )
trainer.log_metrics('predict' , _a )
trainer.save_metrics('predict' , _a )
lowerCamelCase__ = np.argmax(_a , axis=1 )
lowerCamelCase__ = os.path.join(training_args.output_dir , 'predictions.txt' )
if trainer.is_world_process_zero():
with open(_a , 'w' ) as writer:
writer.write('index\tprediction\n' )
for index, item in enumerate(_a ):
lowerCamelCase__ = label_list[item]
writer.write(F'{index}\t{item}\n' )
if __name__ == "__main__":
main()
| 659 |
"""simple docstring"""
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def snake_case ( _a: Optional[Any] )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = [False] * len(_a )
lowerCamelCase__ = [-1] * len(_a )
def dfs(_a: Any , _a: Optional[int] ):
lowerCamelCase__ = True
lowerCamelCase__ = c
for u in graph[v]:
if not visited[u]:
dfs(_a , 1 - c )
for i in range(len(_a ) ):
if not visited[i]:
dfs(_a , 0 )
for i in range(len(_a ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_snake_case = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 659 | 1 |
"""simple docstring"""
def snake_case ( _a: list[list[float]] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = []
for data in source_data:
for i, el in enumerate(_a ):
if len(_a ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(_a ) )
return data_lists
def snake_case ( _a: list[list[float]] , _a: list[int] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = []
for dlist, weight in zip(_a , _a ):
lowerCamelCase__ = min(_a )
lowerCamelCase__ = max(_a )
lowerCamelCase__ = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowerCamelCase__ = F'Invalid weight of {weight:f} provided'
raise ValueError(_a )
score_lists.append(_a )
return score_lists
def snake_case ( _a: list[list[float]] )-> list[float]:
'''simple docstring'''
lowerCamelCase__ = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(_a ):
lowerCamelCase__ = final_scores[j] + ele
return final_scores
def snake_case ( _a: list[list[float]] , _a: list[int] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = get_data(_a )
lowerCamelCase__ = calculate_each_score(_a , _a )
lowerCamelCase__ = generate_final_scores(_a )
# append scores to source data
for i, ele in enumerate(_a ):
source_data[i].append(_a )
return source_data
| 659 |
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
_snake_case = TypeVar("KEY")
_snake_case = TypeVar("VAL")
@dataclass(frozen=SCREAMING_SNAKE_CASE_ , slots=SCREAMING_SNAKE_CASE_ )
class _a ( Generic[KEY, VAL] ):
a_ : KEY
a_ : VAL
class _a ( _Item ):
def __init__( self : List[str] ):
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __bool__( self : str ):
return False
_snake_case = _DeletedItem()
class _a ( MutableMapping[KEY, VAL] ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : float = 0.75 ):
lowerCamelCase__ = initial_block_size
lowerCamelCase__ = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCamelCase__ = capacity_factor
lowerCamelCase__ = 0
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : KEY ):
return hash(SCREAMING_SNAKE_CASE__ ) % len(self._buckets )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : int ):
return (ind + 1) % len(self._buckets )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
lowerCamelCase__ = self._buckets[ind]
if not stored:
lowerCamelCase__ = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self._len += 1
return True
elif stored.key == key:
lowerCamelCase__ = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return True
else:
return False
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : int ):
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCamelCase__ = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = self._buckets
lowerCamelCase__ = [None] * new_size
lowerCamelCase__ = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def _UpperCamelCase ( self : List[str] ):
self._resize(len(self._buckets ) * 2 )
def _UpperCamelCase ( self : Optional[int] ):
self._resize(len(self._buckets ) // 2 )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : KEY ):
lowerCamelCase__ = self._get_bucket_index(SCREAMING_SNAKE_CASE__ )
for _ in range(len(self._buckets ) ):
yield ind
lowerCamelCase__ = self._get_next_ind(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
if self._try_set(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
break
def __setitem__( self : Dict , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
if self._is_full():
self._size_up()
self._add_item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __delitem__( self : Dict , SCREAMING_SNAKE_CASE__ : KEY ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = self._buckets[ind]
if item is None:
raise KeyError(SCREAMING_SNAKE_CASE__ )
if item is _deleted:
continue
if item.key == key:
lowerCamelCase__ = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : str , SCREAMING_SNAKE_CASE__ : KEY ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(SCREAMING_SNAKE_CASE__ )
def __len__( self : List[Any] ):
return self._len
def __iter__( self : Optional[int] ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : str ):
lowerCamelCase__ = ' ,'.join(
F'{item.key}: {item.val}' for item in self._buckets if item )
return F'HashMap({val_string})'
| 659 | 1 |
"""simple docstring"""
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def snake_case ( _a: str = "" )-> dict[str, float]:
'''simple docstring'''
lowerCamelCase__ = url or 'https://www.imdb.com/chart/top/?ref_=nv_mv_250'
lowerCamelCase__ = BeautifulSoup(requests.get(_a ).text , 'html.parser' )
lowerCamelCase__ = soup.find_all('td' , attrs='titleColumn' )
lowerCamelCase__ = soup.find_all('td' , class_='ratingColumn imdbRating' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(_a , _a )
}
def snake_case ( _a: str = "IMDb_Top_250_Movies.csv" )-> None:
'''simple docstring'''
lowerCamelCase__ = get_imdb_top_aaa_movies()
with open(_a , 'w' , newline='' ) as out_file:
lowerCamelCase__ = csv.writer(_a )
writer.writerow(['Movie title', 'IMDb rating'] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 659 |
"""simple docstring"""
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
def count_of_possible_combinations(_a: int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_a )
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
_a: int , _a: list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowerCamelCase__ = sum(
count_of_possible_combinations_with_dp_array(target - item , _a )
for item in array )
lowerCamelCase__ = answer
return answer
lowerCamelCase__ = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_a , _a )
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = [0] * (target + 1)
lowerCamelCase__ = 1
for i in range(1 , target + 1 ):
for j in range(_a ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = 3
_snake_case = 5
_snake_case = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 659 | 1 |
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "spiece.model"}
_snake_case = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
}
}
_snake_case = {
"google/bigbird-roberta-base": 4096,
"google/bigbird-roberta-large": 4096,
"google/bigbird-base-trivia-itc": 4096,
}
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : List[Any] = VOCAB_FILES_NAMES
a_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
a_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : int = ['input_ids', 'attention_mask']
a_ : List[int] = []
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int="<unk>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="<s>" , SCREAMING_SNAKE_CASE__ : Optional[int]="</s>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="<pad>" , SCREAMING_SNAKE_CASE__ : Optional[int]="[SEP]" , SCREAMING_SNAKE_CASE__ : int="[MASK]" , SCREAMING_SNAKE_CASE__ : List[Any]="[CLS]" , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE__ : List[str] , ):
lowerCamelCase__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else bos_token
lowerCamelCase__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else eos_token
lowerCamelCase__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else unk_token
lowerCamelCase__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else pad_token
lowerCamelCase__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else cls_token
lowerCamelCase__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = vocab_file
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE__ )
@property
def _UpperCamelCase ( self : List[Any] ):
return self.sp_model.get_piece_size()
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ):
lowerCamelCase__ = self.__dict__.copy()
lowerCamelCase__ = None
return state
def __setstate__( self : str , SCREAMING_SNAKE_CASE__ : Optional[int] ):
lowerCamelCase__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCamelCase__ = {}
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] ):
return self.sp_model.piece_to_id(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__ )
return token
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[str] ):
lowerCamelCase__ = []
lowerCamelCase__ = ''
lowerCamelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) + token
lowerCamelCase__ = True
lowerCamelCase__ = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ )
return out_string.strip()
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : bool = True , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ):
lowerCamelCase__ = kwargs.pop('use_source_tokenizer' , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowerCamelCase__ = []
lowerCamelCase__ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(SCREAMING_SNAKE_CASE__ ) )
lowerCamelCase__ = []
sub_texts.append(SCREAMING_SNAKE_CASE__ )
else:
current_sub_text.append(SCREAMING_SNAKE_CASE__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(SCREAMING_SNAKE_CASE__ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
lowerCamelCase__ = re.sub(R' (\[(MASK|SEP)\])' , R'\1' , ' '.join(SCREAMING_SNAKE_CASE__ ) )
else:
lowerCamelCase__ = ''.join(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowerCamelCase__ = self.clean_up_tokenization(SCREAMING_SNAKE_CASE__ )
return clean_text
else:
return text
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase__ = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as fi:
lowerCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
lowerCamelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 659 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 659 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class _a :
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any]=13 , SCREAMING_SNAKE_CASE__ : Dict=7 , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Tuple=99 , SCREAMING_SNAKE_CASE__ : str=32 , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : int=4 , SCREAMING_SNAKE_CASE__ : Dict=37 , SCREAMING_SNAKE_CASE__ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=5_12 , SCREAMING_SNAKE_CASE__ : Optional[int]=16 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : Dict=0.02 , SCREAMING_SNAKE_CASE__ : List[str]=3 , SCREAMING_SNAKE_CASE__ : int=4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , ):
lowerCamelCase__ = parent
lowerCamelCase__ = 13
lowerCamelCase__ = 7
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = 99
lowerCamelCase__ = 32
lowerCamelCase__ = 2
lowerCamelCase__ = 4
lowerCamelCase__ = 37
lowerCamelCase__ = 'gelu'
lowerCamelCase__ = 0.1
lowerCamelCase__ = 0.1
lowerCamelCase__ = 5_12
lowerCamelCase__ = 16
lowerCamelCase__ = 2
lowerCamelCase__ = 0.02
lowerCamelCase__ = 3
lowerCamelCase__ = 4
lowerCamelCase__ = None
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ = None
if self.use_token_type_ids:
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=SCREAMING_SNAKE_CASE__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple ):
lowerCamelCase__ = TFRoFormerModel(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCamelCase__ = [input_ids, input_mask]
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = True
lowerCamelCase__ = TFRoFormerForCausalLM(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )['logits']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple ):
lowerCamelCase__ = TFRoFormerForMaskedLM(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = TFRoFormerForSequenceClassification(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = self.num_choices
lowerCamelCase__ = TFRoFormerForMultipleChoice(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = TFRoFormerForTokenClassification(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] ):
lowerCamelCase__ = TFRoFormerForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = config_and_inputs
lowerCamelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _a ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Tuple = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
a_ : Union[str, Any] = (
{
'feature-extraction': TFRoFormerModel,
'fill-mask': TFRoFormerForMaskedLM,
'question-answering': TFRoFormerForQuestionAnswering,
'text-classification': TFRoFormerForSequenceClassification,
'text-generation': TFRoFormerForCausalLM,
'token-classification': TFRoFormerForTokenClassification,
'zero-shot': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
a_ : Dict = False
a_ : str = False
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = TFRoFormerModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def _UpperCamelCase ( self : Any ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[Any] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base' )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@require_tf
class _a ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
lowerCamelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )[0]
# TODO Replace vocab size
lowerCamelCase__ = 5_00_00
lowerCamelCase__ = [1, 6, vocab_size]
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowerCamelCase__ = tf.constant(
[
[
[-0.12_05_33_41, -1.0_26_49_01, 0.29_22_19_46],
[-1.5_13_37_83, 0.19_74_33, 0.15_19_06_07],
[-5.0_13_54_03, -3.90_02_56, -0.84_03_87_64],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 )
@require_tf
class _a ( unittest.TestCase ):
a_ : Optional[int] = 1e-4
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = tf.constant([[4, 10]] )
lowerCamelCase__ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
lowerCamelCase__ = emba(input_ids.shape )
lowerCamelCase__ = tf.constant(
[[0.00_00, 0.00_00, 0.00_00, 1.00_00, 1.00_00, 1.00_00], [0.84_15, 0.04_64, 0.00_22, 0.54_03, 0.99_89, 1.00_00]] )
tf.debugging.assert_near(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=self.tolerance )
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = tf.constant(
[
[0.00_00, 0.00_00, 0.00_00, 0.00_00, 0.00_00],
[0.84_15, 0.82_19, 0.80_20, 0.78_19, 0.76_17],
[0.90_93, 0.93_64, 0.95_81, 0.97_49, 0.98_70],
] )
lowerCamelCase__ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_12 , embedding_dim=5_12 )
emba([2, 16, 5_12] )
lowerCamelCase__ = emba.weight[:3, :5]
tf.debugging.assert_near(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=self.tolerance )
@require_tf
class _a ( unittest.TestCase ):
a_ : Union[str, Any] = 1e-4
def _UpperCamelCase ( self : Any ):
# 2,12,16,64
lowerCamelCase__ = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00
lowerCamelCase__ = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00
lowerCamelCase__ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
lowerCamelCase__ = embed_positions([2, 16, 7_68] )[None, None, :, :]
lowerCamelCase__ , lowerCamelCase__ = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tf.constant(
[
[0.00_00, 0.01_00, 0.02_00, 0.03_00, 0.04_00, 0.05_00, 0.06_00, 0.07_00],
[-0.20_12, 0.88_97, 0.02_63, 0.94_01, 0.20_74, 0.94_63, 0.34_81, 0.93_43],
[-1.70_57, 0.62_71, -1.21_45, 1.38_97, -0.63_03, 1.76_47, -0.11_73, 1.89_85],
[-2.17_31, -1.63_97, -2.73_58, 0.28_54, -2.18_40, 1.71_83, -1.30_18, 2.48_71],
[0.27_17, -3.61_73, -2.92_06, -2.19_88, -3.66_38, 0.38_58, -2.91_55, 2.29_80],
[3.98_59, -2.15_80, -0.79_84, -4.49_04, -4.11_81, -2.02_52, -4.47_82, 1.12_53],
] )
lowerCamelCase__ = tf.constant(
[
[0.00_00, -0.01_00, -0.02_00, -0.03_00, -0.04_00, -0.05_00, -0.06_00, -0.07_00],
[0.20_12, -0.88_97, -0.02_63, -0.94_01, -0.20_74, -0.94_63, -0.34_81, -0.93_43],
[1.70_57, -0.62_71, 1.21_45, -1.38_97, 0.63_03, -1.76_47, 0.11_73, -1.89_85],
[2.17_31, 1.63_97, 2.73_58, -0.28_54, 2.18_40, -1.71_83, 1.30_18, -2.48_71],
[-0.27_17, 3.61_73, 2.92_06, 2.19_88, 3.66_38, -0.38_58, 2.91_55, -2.29_80],
[-3.98_59, 2.15_80, 0.79_84, 4.49_04, 4.11_81, 2.02_52, 4.47_82, -1.12_53],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , SCREAMING_SNAKE_CASE__ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , SCREAMING_SNAKE_CASE__ , atol=self.tolerance )
| 659 |
"""simple docstring"""
def snake_case ( _a: list[list[float]] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = []
for data in source_data:
for i, el in enumerate(_a ):
if len(_a ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(_a ) )
return data_lists
def snake_case ( _a: list[list[float]] , _a: list[int] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = []
for dlist, weight in zip(_a , _a ):
lowerCamelCase__ = min(_a )
lowerCamelCase__ = max(_a )
lowerCamelCase__ = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowerCamelCase__ = F'Invalid weight of {weight:f} provided'
raise ValueError(_a )
score_lists.append(_a )
return score_lists
def snake_case ( _a: list[list[float]] )-> list[float]:
'''simple docstring'''
lowerCamelCase__ = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(_a ):
lowerCamelCase__ = final_scores[j] + ele
return final_scores
def snake_case ( _a: list[list[float]] , _a: list[int] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = get_data(_a )
lowerCamelCase__ = calculate_each_score(_a , _a )
lowerCamelCase__ = generate_final_scores(_a )
# append scores to source data
for i, ele in enumerate(_a ):
source_data[i].append(_a )
return source_data
| 659 | 1 |
"""simple docstring"""
from manim import *
class _a ( SCREAMING_SNAKE_CASE_ ):
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = Rectangle(height=0.5 , width=0.5 )
lowerCamelCase__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCamelCase__ = Rectangle(height=0.25 , width=0.25 )
lowerCamelCase__ = [mem.copy() for i in range(6 )]
lowerCamelCase__ = [mem.copy() for i in range(6 )]
lowerCamelCase__ = VGroup(*SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0 )
lowerCamelCase__ = VGroup(*SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0 )
lowerCamelCase__ = VGroup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0 )
lowerCamelCase__ = Text('CPU' , font_size=24 )
lowerCamelCase__ = Group(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = [mem.copy() for i in range(4 )]
lowerCamelCase__ = VGroup(*SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0 )
lowerCamelCase__ = Text('GPU' , font_size=24 )
lowerCamelCase__ = Group(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE__ )
gpu.move_to([-1, -1, 0] )
self.add(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = [mem.copy() for i in range(6 )]
lowerCamelCase__ = VGroup(*SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0 )
lowerCamelCase__ = Text('Model' , font_size=24 )
lowerCamelCase__ = Group(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE__ )
model.move_to([3, -1.0, 0] )
self.add(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = []
lowerCamelCase__ = []
for i, rect in enumerate(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = fill.copy().set_fill(SCREAMING_SNAKE_CASE__ , opacity=0.8 )
target.move_to(SCREAMING_SNAKE_CASE__ )
model_arr.append(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(SCREAMING_SNAKE_CASE__ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(SCREAMING_SNAKE_CASE__ )
self.add(*SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = [meta_mem.copy() for i in range(6 )]
lowerCamelCase__ = [meta_mem.copy() for i in range(6 )]
lowerCamelCase__ = VGroup(*SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0 )
lowerCamelCase__ = VGroup(*SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0 )
lowerCamelCase__ = VGroup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0 )
lowerCamelCase__ = Text('Disk' , font_size=24 )
lowerCamelCase__ = Group(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE__ )
disk.move_to([-4, -1.25, 0] )
self.add(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCamelCase__ = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(SCREAMING_SNAKE_CASE__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = MarkupText(
F'Now watch as an input is passed through the model\nand how the memory is utilized and handled.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(SCREAMING_SNAKE_CASE__ ) )
lowerCamelCase__ = Square(0.3 )
input.set_fill(SCREAMING_SNAKE_CASE__ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , SCREAMING_SNAKE_CASE__ , buff=0.5 )
self.play(Write(SCREAMING_SNAKE_CASE__ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=SCREAMING_SNAKE_CASE__ , buff=0.02 )
self.play(MoveToTarget(SCREAMING_SNAKE_CASE__ ) )
self.play(FadeOut(SCREAMING_SNAKE_CASE__ ) )
lowerCamelCase__ = Arrow(start=SCREAMING_SNAKE_CASE__ , end=SCREAMING_SNAKE_CASE__ , color=SCREAMING_SNAKE_CASE__ , buff=0.5 )
a.next_to(model_arr[0].get_left() , SCREAMING_SNAKE_CASE__ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
lowerCamelCase__ = MarkupText(
F'As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(SCREAMING_SNAKE_CASE__ , run_time=3 ) )
lowerCamelCase__ = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(SCREAMING_SNAKE_CASE__ ) , Circumscribe(model_arr[0] , color=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) , Circumscribe(model_cpu_arr[0] , color=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) , Circumscribe(gpu_rect[0] , color=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
lowerCamelCase__ = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , SCREAMING_SNAKE_CASE__ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
lowerCamelCase__ = AnimationGroup(
FadeOut(SCREAMING_SNAKE_CASE__ , run_time=0.5 ) , MoveToTarget(SCREAMING_SNAKE_CASE__ , run_time=0.5 ) , FadeIn(SCREAMING_SNAKE_CASE__ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(SCREAMING_SNAKE_CASE__ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
lowerCamelCase__ = 0.7
self.play(
Circumscribe(model_arr[i] , **SCREAMING_SNAKE_CASE__ ) , Circumscribe(cpu_left_col_base[i] , **SCREAMING_SNAKE_CASE__ ) , Circumscribe(cpu_left_col_base[i + 1] , color=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) , Circumscribe(gpu_rect[0] , color=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) , Circumscribe(model_arr[i + 1] , color=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) , Circumscribe(cpu_left_col_base[-1] , color=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) , Circumscribe(gpu_rect[0] , color=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
lowerCamelCase__ = a_c
lowerCamelCase__ = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(SCREAMING_SNAKE_CASE__ ) , FadeOut(SCREAMING_SNAKE_CASE__ , run_time=0.5 ) , )
lowerCamelCase__ = MarkupText(F'Inference on a model too large for GPU memory\nis successfully completed.' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(SCREAMING_SNAKE_CASE__ , run_time=3 ) , MoveToTarget(SCREAMING_SNAKE_CASE__ ) )
self.wait()
| 659 |
"""simple docstring"""
from __future__ import annotations
from math import gcd
def snake_case ( _a: int , _a: int = 2 , _a: int = 1 , _a: int = 3 , )-> int | None:
'''simple docstring'''
if num < 2:
raise ValueError('The input value cannot be less than 2' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(_a: int , _a: int , _a: int ) -> int:
return (pow(_a , 2 ) + step) % modulus
for _ in range(_a ):
# These track the position within the cycle detection logic.
lowerCamelCase__ = seed
lowerCamelCase__ = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
lowerCamelCase__ = rand_fn(_a , _a , _a )
lowerCamelCase__ = rand_fn(_a , _a , _a )
lowerCamelCase__ = rand_fn(_a , _a , _a )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
lowerCamelCase__ = gcd(hare - tortoise , _a )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
lowerCamelCase__ = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"num",
type=int,
help="The value to find a divisor of",
)
parser.add_argument(
"--attempts",
type=int,
default=3,
help="The number of attempts before giving up",
)
_snake_case = parser.parse_args()
_snake_case = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f"""{args.num} is probably prime""")
else:
_snake_case = args.num // divisor
print(f"""{args.num} = {divisor} * {quotient}""")
| 659 | 1 |
"""simple docstring"""
from collections.abc import Sequence
def snake_case ( _a: Sequence[int] | None = None )-> int:
'''simple docstring'''
if nums is None or not nums:
raise ValueError('Input sequence should not be empty' )
lowerCamelCase__ = nums[0]
for i in range(1 , len(_a ) ):
lowerCamelCase__ = nums[i]
lowerCamelCase__ = max(_a , ans + num , _a )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
_snake_case = int(input("Enter number of elements : ").strip())
_snake_case = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n]
print(max_subsequence_sum(array))
| 659 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 659 | 1 |
"""simple docstring"""
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_snake_case = 0
_snake_case = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_snake_case = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_snake_case = tuple[int, int]
class _a :
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Node | None , ):
lowerCamelCase__ = pos_x
lowerCamelCase__ = pos_y
lowerCamelCase__ = (pos_y, pos_x)
lowerCamelCase__ = goal_x
lowerCamelCase__ = goal_y
lowerCamelCase__ = g_cost
lowerCamelCase__ = parent
lowerCamelCase__ = self.calculate_heuristic()
lowerCamelCase__ = self.g_cost + self.h_cost
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ = self.pos_x - self.goal_x
lowerCamelCase__ = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(SCREAMING_SNAKE_CASE__ ) + abs(SCREAMING_SNAKE_CASE__ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : str , SCREAMING_SNAKE_CASE__ : Node ):
return self.f_cost < other.f_cost
class _a :
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : TPosition , SCREAMING_SNAKE_CASE__ : TPosition ):
lowerCamelCase__ = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = [self.start]
lowerCamelCase__ = []
lowerCamelCase__ = False
def _UpperCamelCase ( self : Dict ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
lowerCamelCase__ = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(SCREAMING_SNAKE_CASE__ )
self.closed_nodes.append(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.get_successors(SCREAMING_SNAKE_CASE__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(SCREAMING_SNAKE_CASE__ )
else:
# retrieve the best current path
lowerCamelCase__ = self.open_nodes.pop(self.open_nodes.index(SCREAMING_SNAKE_CASE__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(SCREAMING_SNAKE_CASE__ )
else:
self.open_nodes.append(SCREAMING_SNAKE_CASE__ )
return [self.start.pos]
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Node ):
lowerCamelCase__ = []
for action in delta:
lowerCamelCase__ = parent.pos_x + action[1]
lowerCamelCase__ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , SCREAMING_SNAKE_CASE__ , ) )
return successors
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Node | None ):
lowerCamelCase__ = node
lowerCamelCase__ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowerCamelCase__ = current_node.parent
path.reverse()
return path
class _a :
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : TPosition , SCREAMING_SNAKE_CASE__ : TPosition ):
lowerCamelCase__ = AStar(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = AStar(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = False
def _UpperCamelCase ( self : Dict ):
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
lowerCamelCase__ = self.fwd_astar.open_nodes.pop(0 )
lowerCamelCase__ = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.fwd_astar.closed_nodes.append(SCREAMING_SNAKE_CASE__ )
self.bwd_astar.closed_nodes.append(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = current_bwd_node
lowerCamelCase__ = current_fwd_node
lowerCamelCase__ = {
self.fwd_astar: self.fwd_astar.get_successors(SCREAMING_SNAKE_CASE__ ),
self.bwd_astar: self.bwd_astar.get_successors(SCREAMING_SNAKE_CASE__ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(SCREAMING_SNAKE_CASE__ )
else:
# retrieve the best current path
lowerCamelCase__ = astar.open_nodes.pop(
astar.open_nodes.index(SCREAMING_SNAKE_CASE__ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(SCREAMING_SNAKE_CASE__ )
else:
astar.open_nodes.append(SCREAMING_SNAKE_CASE__ )
return [self.fwd_astar.start.pos]
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Node , SCREAMING_SNAKE_CASE__ : Node ):
lowerCamelCase__ = self.fwd_astar.retrace_path(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.bwd_astar.retrace_path(SCREAMING_SNAKE_CASE__ )
bwd_path.pop()
bwd_path.reverse()
lowerCamelCase__ = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_snake_case = (0, 0)
_snake_case = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_snake_case = time.time()
_snake_case = AStar(init, goal)
_snake_case = a_star.search()
_snake_case = time.time() - start_time
print(f"""AStar execution time = {end_time:f} seconds""")
_snake_case = time.time()
_snake_case = BidirectionalAStar(init, goal)
_snake_case = time.time() - bd_start_time
print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 659 |
"""simple docstring"""
from __future__ import annotations
_snake_case = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def snake_case ( _a: list[list[int]] , _a: list[int] , _a: list[int] , _a: int , _a: list[list[int]] , )-> tuple[list[list[int]], list[list[int]]]:
'''simple docstring'''
lowerCamelCase__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_a ) )
] # the reference grid
lowerCamelCase__ = 1
lowerCamelCase__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_a ) )
] # the action grid
lowerCamelCase__ = init[0]
lowerCamelCase__ = init[1]
lowerCamelCase__ = 0
lowerCamelCase__ = g + heuristic[x][y] # cost from starting cell to destination cell
lowerCamelCase__ = [[f, g, x, y]]
lowerCamelCase__ = False # flag that is set when search is complete
lowerCamelCase__ = False # flag set if we can't find expand
while not found and not resign:
if len(_a ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
lowerCamelCase__ = cell.pop()
lowerCamelCase__ = next_cell[2]
lowerCamelCase__ = next_cell[3]
lowerCamelCase__ = next_cell[1]
if x == goal[0] and y == goal[1]:
lowerCamelCase__ = True
else:
for i in range(len(_a ) ): # to try out different valid actions
lowerCamelCase__ = x + DIRECTIONS[i][0]
lowerCamelCase__ = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_a ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
lowerCamelCase__ = g + cost
lowerCamelCase__ = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
lowerCamelCase__ = 1
lowerCamelCase__ = i
lowerCamelCase__ = []
lowerCamelCase__ = goal[0]
lowerCamelCase__ = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
lowerCamelCase__ = x - DIRECTIONS[action[x][y]][0]
lowerCamelCase__ = y - DIRECTIONS[action[x][y]][1]
lowerCamelCase__ = xa
lowerCamelCase__ = ya
invpath.append([x, y] )
lowerCamelCase__ = []
for i in range(len(_a ) ):
path.append(invpath[len(_a ) - 1 - i] )
return path, action
if __name__ == "__main__":
_snake_case = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
_snake_case = [0, 0]
# all coordinates are given in format [y,x]
_snake_case = [len(grid) - 1, len(grid[0]) - 1]
_snake_case = 1
# the cost map which pushes the path closer to the goal
_snake_case = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
_snake_case = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
_snake_case = 99
_snake_case , _snake_case = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 659 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_snake_case = {
"configuration_efficientnet": [
"EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientNetConfig",
"EfficientNetOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["EfficientNetImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientNetForImageClassification",
"EfficientNetModel",
"EfficientNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 659 |
"""simple docstring"""
def snake_case ( _a: int = 4000000 )-> int:
'''simple docstring'''
lowerCamelCase__ = [0, 1]
lowerCamelCase__ = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowerCamelCase__ = 0
for j in range(len(_a ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 659 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class _a ( unittest.TestCase ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=13 , SCREAMING_SNAKE_CASE__ : Optional[int]=7 , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : int=99 , SCREAMING_SNAKE_CASE__ : Optional[int]=32 , SCREAMING_SNAKE_CASE__ : List[str]=5 , SCREAMING_SNAKE_CASE__ : Dict=4 , SCREAMING_SNAKE_CASE__ : Any=37 , SCREAMING_SNAKE_CASE__ : str="gelu" , SCREAMING_SNAKE_CASE__ : List[str]=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Dict=5_12 , SCREAMING_SNAKE_CASE__ : str=16 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : Dict=0.02 , SCREAMING_SNAKE_CASE__ : Any=4 , ):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_attention_mask
lowerCamelCase__ = use_token_type_ids
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = type_sequence_label_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = num_choices
def _UpperCamelCase ( self : Optional[Any] ):
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_attention_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ = None
if self.use_token_type_ids:
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = config_and_inputs
lowerCamelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class _a ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Any = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = FlaxAlbertModelTester(self )
@slow
def _UpperCamelCase ( self : int ):
for model_class_name in self.all_model_classes:
lowerCamelCase__ = model_class_name.from_pretrained('albert-base-v2' )
lowerCamelCase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@require_flax
class _a ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = FlaxAlbertModel.from_pretrained('albert-base-v2' )
lowerCamelCase__ = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
lowerCamelCase__ = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )[0]
lowerCamelCase__ = (1, 11, 7_68)
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = np.array(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
| 659 |
"""simple docstring"""
def snake_case ( _a: List[Any] , _a: Any , _a: str , _a: List[Any] )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = [False] * len(_a )
lowerCamelCase__ = []
queue.append(_a )
lowerCamelCase__ = True
while queue:
lowerCamelCase__ = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_a )
lowerCamelCase__ = True
lowerCamelCase__ = u
return visited[t]
def snake_case ( _a: List[Any] , _a: str , _a: List[str] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = [-1] * (len(_a ))
lowerCamelCase__ = 0
while bfs(_a , _a , _a , _a ):
lowerCamelCase__ = float('Inf' )
lowerCamelCase__ = sink
while s != source:
# Find the minimum value in select path
lowerCamelCase__ = min(_a , graph[parent[s]][s] )
lowerCamelCase__ = parent[s]
max_flow += path_flow
lowerCamelCase__ = sink
while v != source:
lowerCamelCase__ = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowerCamelCase__ = parent[v]
return max_flow
_snake_case = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_snake_case , _snake_case = 0, 5
print(ford_fulkerson(graph, source, sink))
| 659 | 1 |
"""simple docstring"""
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
_snake_case = logging.get_logger(__name__)
def snake_case ( _a: Union[str, Any] , _a: List[Any] )-> Dict:
'''simple docstring'''
lowerCamelCase__ = nn.functional.normalize(_a )
lowerCamelCase__ = nn.functional.normalize(_a )
return torch.mm(_a , normalized_text_embeds.t() )
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Union[str, Any] = CLIPConfig
a_ : Tuple = ['CLIPEncoderLayer']
def __init__( self : str , SCREAMING_SNAKE_CASE__ : CLIPConfig ):
super().__init__(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = CLIPVisionModel(config.vision_config )
lowerCamelCase__ = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = nn.Parameter(torch.ones(17 ) , requires_grad=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = nn.Parameter(torch.ones(3 ) , requires_grad=SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any ):
lowerCamelCase__ = self.vision_model(SCREAMING_SNAKE_CASE__ )[1] # pooled_output
lowerCamelCase__ = self.visual_projection(SCREAMING_SNAKE_CASE__ )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCamelCase__ = cosine_distance(SCREAMING_SNAKE_CASE__ , self.special_care_embeds ).cpu().float().numpy()
lowerCamelCase__ = cosine_distance(SCREAMING_SNAKE_CASE__ , self.concept_embeds ).cpu().float().numpy()
lowerCamelCase__ = []
lowerCamelCase__ = image_embeds.shape[0]
for i in range(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = {'special_scores': {}, 'special_care': [], 'concept_scores': {}, 'bad_concepts': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
lowerCamelCase__ = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
lowerCamelCase__ = special_cos_dist[i][concept_idx]
lowerCamelCase__ = self.special_care_embeds_weights[concept_idx].item()
lowerCamelCase__ = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['special_scores'][concept_idx]} )
lowerCamelCase__ = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
lowerCamelCase__ = cos_dist[i][concept_idx]
lowerCamelCase__ = self.concept_embeds_weights[concept_idx].item()
lowerCamelCase__ = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(SCREAMING_SNAKE_CASE__ )
result.append(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = [len(res['bad_concepts'] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : torch.FloatTensor ):
lowerCamelCase__ = self.vision_model(SCREAMING_SNAKE_CASE__ )[1] # pooled_output
lowerCamelCase__ = self.visual_projection(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = cosine_distance(SCREAMING_SNAKE_CASE__ , self.special_care_embeds )
lowerCamelCase__ = cosine_distance(SCREAMING_SNAKE_CASE__ , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
lowerCamelCase__ = 0.0
lowerCamelCase__ = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
lowerCamelCase__ = torch.any(special_scores > 0 , dim=1 )
lowerCamelCase__ = special_care * 0.01
lowerCamelCase__ = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
lowerCamelCase__ = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
lowerCamelCase__ = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 659 |
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
_snake_case = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Optional[int]=1 ):
lowerCamelCase__ = tokenizer
lowerCamelCase__ = dataset
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ ) if n_tasks is None else n_tasks
lowerCamelCase__ = n_copies
def __iter__( self : Any ):
lowerCamelCase__ = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
lowerCamelCase__ = self.tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = start_length
lowerCamelCase__ = eof_strings
lowerCamelCase__ = tokenizer
def __call__( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowerCamelCase__ = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(SCREAMING_SNAKE_CASE__ )
def snake_case ( _a: List[Any] )-> Dict:
'''simple docstring'''
lowerCamelCase__ = re.split('(%s)' % '|'.join(_a ) , _a )
# last string should be ""
return "".join(string_list[:-2] )
def snake_case ( _a: List[Any] , _a: Optional[int] , _a: str , _a: Union[str, Any] , _a: Dict , _a: Optional[int]=20 , **_a: Optional[int] )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = defaultdict(_a ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_a ) ):
with torch.no_grad():
lowerCamelCase__ = batch['ids'].shape[-1]
lowerCamelCase__ = accelerator.unwrap_model(_a ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_a , **_a )
# each task is generated batch_size times
lowerCamelCase__ = batch['task_id'].repeat(_a )
lowerCamelCase__ = accelerator.pad_across_processes(
_a , dim=1 , pad_index=tokenizer.pad_token_id )
lowerCamelCase__ , lowerCamelCase__ = accelerator.gather((generated_tokens, generated_tasks) )
lowerCamelCase__ = generated_tokens.cpu().numpy()
lowerCamelCase__ = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_a , _a ):
gen_token_dict[task].append(_a )
lowerCamelCase__ = [[] for _ in range(_a )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowerCamelCase__ = tokenizer.decode(_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a )
code_gens[task].append(remove_last_block(_a ) )
return code_gens
def snake_case ( )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = HfArgumentParser(_a )
lowerCamelCase__ = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowerCamelCase__ = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowerCamelCase__ = 'false'
if args.num_workers is None:
lowerCamelCase__ = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowerCamelCase__ = Accelerator()
set_seed(args.seed , device_specific=_a )
# Load model and tokenizer
lowerCamelCase__ = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase__ = tokenizer.eos_token
lowerCamelCase__ = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowerCamelCase__ = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _a , _a )] ),
}
# Load evaluation dataset and metric
lowerCamelCase__ = load_dataset('openai_humaneval' )
lowerCamelCase__ = load_metric('code_eval' )
lowerCamelCase__ = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
lowerCamelCase__ = args.n_samples // args.batch_size
lowerCamelCase__ = TokenizedDataset(_a , human_eval['test'] , n_copies=_a , n_tasks=_a )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowerCamelCase__ = DataLoader(_a , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowerCamelCase__ = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
lowerCamelCase__ , lowerCamelCase__ = accelerator.prepare(_a , _a )
lowerCamelCase__ = complete_code(
_a , _a , _a , _a , n_tasks=_a , batch_size=args.batch_size , **_a , )
if accelerator.is_main_process:
lowerCamelCase__ = []
for task in tqdm(range(_a ) ):
lowerCamelCase__ = human_eval['test'][task]['test']
lowerCamelCase__ = F'check({human_eval["test"][task]["entry_point"]})'
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
lowerCamelCase__ , lowerCamelCase__ = code_eval_metric.compute(
references=_a , predictions=_a , num_workers=args.num_workers )
print(F'Results: {pass_at_k}' )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_a , _a )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 659 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
"configuration_xlm_roberta": [
"XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XLMRobertaConfig",
"XLMRobertaOnnxConfig",
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["XLMRobertaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["XLMRobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaForCausalLM",
"XLMRobertaForMaskedLM",
"XLMRobertaForMultipleChoice",
"XLMRobertaForQuestionAnswering",
"XLMRobertaForSequenceClassification",
"XLMRobertaForTokenClassification",
"XLMRobertaModel",
"XLMRobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMRobertaForCausalLM",
"TFXLMRobertaForMaskedLM",
"TFXLMRobertaForMultipleChoice",
"TFXLMRobertaForQuestionAnswering",
"TFXLMRobertaForSequenceClassification",
"TFXLMRobertaForTokenClassification",
"TFXLMRobertaModel",
"TFXLMRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxXLMRobertaForMaskedLM",
"FlaxXLMRobertaForCausalLM",
"FlaxXLMRobertaForMultipleChoice",
"FlaxXLMRobertaForQuestionAnswering",
"FlaxXLMRobertaForSequenceClassification",
"FlaxXLMRobertaForTokenClassification",
"FlaxXLMRobertaModel",
"FlaxXLMRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 659 |
"""simple docstring"""
import argparse
import json
from tqdm import tqdm
def snake_case ( )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=_a , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=_a , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=_a , help='where to store parsed gold_data_path file' , )
lowerCamelCase__ = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
lowerCamelCase__ = json.load(_a )
for dpr_record in tqdm(_a ):
lowerCamelCase__ = dpr_record['question']
lowerCamelCase__ = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(_a ) + '\n' )
if __name__ == "__main__":
main()
| 659 | 1 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_snake_case = logging.get_logger(__name__)
# General docstring
_snake_case = "RegNetConfig"
# Base docstring
_snake_case = "facebook/regnet-y-040"
_snake_case = [1, 1088, 7, 7]
# Image classification docstring
_snake_case = "facebook/regnet-y-040"
_snake_case = "tabby, tabby cat"
_snake_case = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class _a ( tf.keras.layers.Layer ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : Optional[str] = "relu" , **SCREAMING_SNAKE_CASE__ : Optional[int] , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
lowerCamelCase__ = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
lowerCamelCase__ = tf.keras.layers.ConvaD(
filters=SCREAMING_SNAKE_CASE__ , kernel_size=SCREAMING_SNAKE_CASE__ , strides=SCREAMING_SNAKE_CASE__ , padding='VALID' , groups=SCREAMING_SNAKE_CASE__ , use_bias=SCREAMING_SNAKE_CASE__ , name='convolution' , )
lowerCamelCase__ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='normalization' )
lowerCamelCase__ = ACTaFN[activation] if activation is not None else tf.identity
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : Tuple ):
lowerCamelCase__ = self.convolution(self.padding(SCREAMING_SNAKE_CASE__ ) )
lowerCamelCase__ = self.normalization(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.activation(SCREAMING_SNAKE_CASE__ )
return hidden_state
class _a ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : RegNetConfig , **SCREAMING_SNAKE_CASE__ : List[Any] ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = config.num_channels
lowerCamelCase__ = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] ):
lowerCamelCase__ = shape_list(SCREAMING_SNAKE_CASE__ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
lowerCamelCase__ = tf.transpose(SCREAMING_SNAKE_CASE__ , perm=(0, 2, 3, 1) )
lowerCamelCase__ = self.embedder(SCREAMING_SNAKE_CASE__ )
return hidden_state
class _a ( tf.keras.layers.Layer ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 2 , **SCREAMING_SNAKE_CASE__ : str ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tf.keras.layers.ConvaD(
filters=SCREAMING_SNAKE_CASE__ , kernel_size=1 , strides=SCREAMING_SNAKE_CASE__ , use_bias=SCREAMING_SNAKE_CASE__ , name='convolution' )
lowerCamelCase__ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='normalization' )
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : tf.Tensor , SCREAMING_SNAKE_CASE__ : bool = False ):
return self.normalization(self.convolution(SCREAMING_SNAKE_CASE__ ) , training=SCREAMING_SNAKE_CASE__ )
class _a ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Tuple ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE__ , name='pooler' )
lowerCamelCase__ = [
tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE__ , kernel_size=1 , activation='relu' , name='attention.0' ),
tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE__ , kernel_size=1 , activation='sigmoid' , name='attention.2' ),
]
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : int ):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
lowerCamelCase__ = self.pooler(SCREAMING_SNAKE_CASE__ )
for layer_module in self.attention:
lowerCamelCase__ = layer_module(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = hidden_state * pooled
return hidden_state
class _a ( tf.keras.layers.Layer ):
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : RegNetConfig , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 1 , **SCREAMING_SNAKE_CASE__ : Dict ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = in_channels != out_channels or stride != 1
lowerCamelCase__ = max(1 , out_channels // config.groups_width )
lowerCamelCase__ = (
TFRegNetShortCut(SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
lowerCamelCase__ = [
TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , groups=SCREAMING_SNAKE_CASE__ , activation=config.hidden_act , name='layer.1' ),
TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE__ , name='layer.2' ),
]
lowerCamelCase__ = ACTaFN[config.hidden_act]
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = hidden_state
for layer_module in self.layers:
lowerCamelCase__ = layer_module(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.shortcut(SCREAMING_SNAKE_CASE__ )
hidden_state += residual
lowerCamelCase__ = self.activation(SCREAMING_SNAKE_CASE__ )
return hidden_state
class _a ( tf.keras.layers.Layer ):
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : RegNetConfig , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 1 , **SCREAMING_SNAKE_CASE__ : Any ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = in_channels != out_channels or stride != 1
lowerCamelCase__ = max(1 , out_channels // config.groups_width )
lowerCamelCase__ = (
TFRegNetShortCut(SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
lowerCamelCase__ = [
TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , groups=SCREAMING_SNAKE_CASE__ , activation=config.hidden_act , name='layer.1' ),
TFRegNetSELayer(SCREAMING_SNAKE_CASE__ , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ),
TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE__ , name='layer.3' ),
]
lowerCamelCase__ = ACTaFN[config.hidden_act]
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
lowerCamelCase__ = hidden_state
for layer_module in self.layers:
lowerCamelCase__ = layer_module(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.shortcut(SCREAMING_SNAKE_CASE__ )
hidden_state += residual
lowerCamelCase__ = self.activation(SCREAMING_SNAKE_CASE__ )
return hidden_state
class _a ( tf.keras.layers.Layer ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : RegNetConfig , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , **SCREAMING_SNAKE_CASE__ : Optional[Any] ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
lowerCamelCase__ = [
# downsampling is done in the first layer with stride of 2
layer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , name='layers.0' ),
*[layer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , name=F'layers.{i+1}' ) for i in range(depth - 1 )],
]
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
for layer_module in self.layers:
lowerCamelCase__ = layer_module(SCREAMING_SNAKE_CASE__ )
return hidden_state
class _a ( tf.keras.layers.Layer ):
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : RegNetConfig , **SCREAMING_SNAKE_CASE__ : Optional[Any] ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
SCREAMING_SNAKE_CASE__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) )
lowerCamelCase__ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(SCREAMING_SNAKE_CASE__ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , depth=SCREAMING_SNAKE_CASE__ , name=F'stages.{i+1}' ) )
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : tf.Tensor , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = True ):
lowerCamelCase__ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowerCamelCase__ = hidden_states + (hidden_state,)
lowerCamelCase__ = stage_module(SCREAMING_SNAKE_CASE__ )
if output_hidden_states:
lowerCamelCase__ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE__ , hidden_states=SCREAMING_SNAKE_CASE__ )
@keras_serializable
class _a ( tf.keras.layers.Layer ):
a_ : Optional[int] = RegNetConfig
def __init__( self : int , SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Dict ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = config
lowerCamelCase__ = TFRegNetEmbeddings(SCREAMING_SNAKE_CASE__ , name='embedder' )
lowerCamelCase__ = TFRegNetEncoder(SCREAMING_SNAKE_CASE__ , name='encoder' )
lowerCamelCase__ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE__ , name='pooler' )
@unpack_inputs
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : tf.Tensor , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : bool = False , ):
lowerCamelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase__ = self.embedder(SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.encoder(
SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = encoder_outputs[0]
lowerCamelCase__ = self.pooler(SCREAMING_SNAKE_CASE__ )
# Change to NCHW output format have uniformity in the modules
lowerCamelCase__ = tf.transpose(SCREAMING_SNAKE_CASE__ , perm=(0, 3, 1, 2) )
lowerCamelCase__ = tf.transpose(SCREAMING_SNAKE_CASE__ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
lowerCamelCase__ = tuple([tf.transpose(SCREAMING_SNAKE_CASE__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE__ , pooler_output=SCREAMING_SNAKE_CASE__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Optional[Any] = RegNetConfig
a_ : Optional[int] = 'regnet'
a_ : str = 'pixel_values'
@property
def _UpperCamelCase ( self : Tuple ):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) , dtype=tf.floataa )}
_snake_case = R"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n"
_snake_case = R"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , SCREAMING_SNAKE_CASE_ , )
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : RegNetConfig , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : int ):
super().__init__(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = TFRegNetMainLayer(SCREAMING_SNAKE_CASE__ , name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : tf.Tensor , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Any=False , ):
lowerCamelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase__ = self.regnet(
pixel_values=SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , SCREAMING_SNAKE_CASE_ , )
class _a ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : RegNetConfig , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : List[Any] ):
super().__init__(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = config.num_labels
lowerCamelCase__ = TFRegNetMainLayer(SCREAMING_SNAKE_CASE__ , name='regnet' )
# classification head
lowerCamelCase__ = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : tf.Tensor = None , SCREAMING_SNAKE_CASE__ : tf.Tensor = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : List[str]=False , ):
lowerCamelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase__ = self.regnet(
SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = outputs.pooler_output if return_dict else outputs[1]
lowerCamelCase__ = self.classifier[0](SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.classifier[1](SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = None if labels is None else self.hf_compute_loss(labels=SCREAMING_SNAKE_CASE__ , logits=SCREAMING_SNAKE_CASE__ )
if not return_dict:
lowerCamelCase__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=SCREAMING_SNAKE_CASE__ , logits=SCREAMING_SNAKE_CASE__ , hidden_states=outputs.hidden_states )
| 659 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 659 | 1 |
"""simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class _a ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
lowerCamelCase__ = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(SCREAMING_SNAKE_CASE__ )
from datasets import load_dataset
lowerCamelCase__ = load_dataset('nielsr/rvlcdip-demo' )
lowerCamelCase__ = dataset['train'][0]['image'].convert('RGB' )
lowerCamelCase__ = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ = model(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = outputs.logits
lowerCamelCase__ = torch.Size((1, 16) )
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] , device=SCREAMING_SNAKE_CASE__ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
| 659 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Union[str, Any] = 'swinv2'
a_ : Optional[int] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : int=2_24 , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : List[Any]=3 , SCREAMING_SNAKE_CASE__ : str=96 , SCREAMING_SNAKE_CASE__ : Dict=[2, 2, 6, 2] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=[3, 6, 12, 24] , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=4.0 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.0 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Tuple="gelu" , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : List[Any]=1e-5 , SCREAMING_SNAKE_CASE__ : int=32 , **SCREAMING_SNAKE_CASE__ : List[str] , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = embed_dim
lowerCamelCase__ = depths
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = num_heads
lowerCamelCase__ = window_size
lowerCamelCase__ = mlp_ratio
lowerCamelCase__ = qkv_bias
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = drop_path_rate
lowerCamelCase__ = hidden_act
lowerCamelCase__ = use_absolute_embeddings
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = initializer_range
lowerCamelCase__ = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase__ = int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE__ ) - 1) )
lowerCamelCase__ = (0, 0, 0, 0)
| 659 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case = {"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"MRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MraForMaskedLM",
"MraForMultipleChoice",
"MraForQuestionAnswering",
"MraForSequenceClassification",
"MraForTokenClassification",
"MraLayer",
"MraModel",
"MraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 659 |
"""simple docstring"""
def snake_case ( _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def snake_case ( _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = 0
while number > 0:
lowerCamelCase__ = number % 10
sum_of_digits += last_digit
lowerCamelCase__ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def snake_case ( _a: int = 100 )-> int:
'''simple docstring'''
lowerCamelCase__ = factorial(_a )
lowerCamelCase__ = split_and_add(_a )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 659 | 1 |
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _a :
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any=2 , SCREAMING_SNAKE_CASE__ : int=3 , SCREAMING_SNAKE_CASE__ : Dict=4 , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : str=7 , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : str=99 , SCREAMING_SNAKE_CASE__ : Any=36 , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : List[str]=4 , SCREAMING_SNAKE_CASE__ : str=37 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : int=5_12 , SCREAMING_SNAKE_CASE__ : List[Any]=16 , SCREAMING_SNAKE_CASE__ : int=2 , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : List[Any]=6 , SCREAMING_SNAKE_CASE__ : Optional[Any]=6 , SCREAMING_SNAKE_CASE__ : List[str]=3 , SCREAMING_SNAKE_CASE__ : Any=4 , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=10_00 , ):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_token_type_ids
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = type_sequence_label_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = coordinate_size
lowerCamelCase__ = shape_size
lowerCamelCase__ = num_labels
lowerCamelCase__ = num_choices
lowerCamelCase__ = scope
lowerCamelCase__ = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
lowerCamelCase__ = text_seq_length
lowerCamelCase__ = (image_size // patch_size) ** 2 + 1
lowerCamelCase__ = self.text_seq_length + self.image_seq_length
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
lowerCamelCase__ = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCamelCase__ = bbox[i, j, 3]
lowerCamelCase__ = bbox[i, j, 1]
lowerCamelCase__ = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCamelCase__ = bbox[i, j, 2]
lowerCamelCase__ = bbox[i, j, 0]
lowerCamelCase__ = tmp_coordinate
lowerCamelCase__ = tf.constant(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.text_seq_length] )
lowerCamelCase__ = None
if self.use_token_type_ids:
lowerCamelCase__ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
lowerCamelCase__ = None
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
lowerCamelCase__ = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = TFLayoutLMvaModel(config=SCREAMING_SNAKE_CASE__ )
# text + image
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(
SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
lowerCamelCase__ = model({'pixel_values': pixel_values} , training=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = TFLayoutLMvaForSequenceClassification(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(
SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = TFLayoutLMvaForTokenClassification(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(
SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] ):
lowerCamelCase__ = 2
lowerCamelCase__ = TFLayoutLMvaForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(
SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , start_positions=SCREAMING_SNAKE_CASE__ , end_positions=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = self.prepare_config_and_inputs()
((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) = config_and_inputs
lowerCamelCase__ = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class _a ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Optional[int] = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
a_ : Union[str, Any] = (
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
a_ : Optional[Any] = False
a_ : List[Any] = False
a_ : str = False
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
return True
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any=False ):
lowerCamelCase__ = copy.deepcopy(SCREAMING_SNAKE_CASE__ )
if model_class in get_values(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = {
k: tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(SCREAMING_SNAKE_CASE__ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
lowerCamelCase__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = TFLayoutLMvaModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def _UpperCamelCase ( self : Tuple ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
if getattr(SCREAMING_SNAKE_CASE__ , 'hf_compute_loss' , SCREAMING_SNAKE_CASE__ ):
# The number of elements in the loss should be the same as the number of elements in the label
lowerCamelCase__ = self._prepare_for_class(inputs_dict.copy() , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=SCREAMING_SNAKE_CASE__ )[0]
]
lowerCamelCase__ = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
lowerCamelCase__ = self._prepare_for_class(inputs_dict.copy() , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = prepared_for_class.pop('input_ids' )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
lowerCamelCase__ = self._prepare_for_class(inputs_dict.copy() , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
lowerCamelCase__ = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
lowerCamelCase__ = -1_00
lowerCamelCase__ = tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
lowerCamelCase__ = self._prepare_for_class(inputs_dict.copy() , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
lowerCamelCase__ = self._prepare_for_class(inputs_dict.copy() , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
# Get keys that were added with the _prepare_for_class function
lowerCamelCase__ = prepared_for_class.keys() - inputs_dict.keys()
lowerCamelCase__ = inspect.signature(model.call ).parameters
lowerCamelCase__ = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
lowerCamelCase__ = {0: 'input_ids'}
for label_key in label_keys:
lowerCamelCase__ = signature_names.index(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = label_key
lowerCamelCase__ = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
lowerCamelCase__ = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
lowerCamelCase__ = prepared_for_class[value]
lowerCamelCase__ = tuple(SCREAMING_SNAKE_CASE__ )
# Send to model
lowerCamelCase__ = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def _UpperCamelCase ( self : Dict ):
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Any ):
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase__ = type
self.model_tester.create_and_check_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Union[str, Any] ):
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Tuple ):
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[int] ):
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@slow
def _UpperCamelCase ( self : Optional[int] ):
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFLayoutLMvaModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def snake_case ( )-> Dict:
'''simple docstring'''
lowerCamelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class _a ( unittest.TestCase ):
@cached_property
def _UpperCamelCase ( self : str ):
return LayoutLMvaImageProcessor(apply_ocr=SCREAMING_SNAKE_CASE__ ) if is_vision_available() else None
@slow
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
lowerCamelCase__ = self.default_image_processor
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='tf' ).pixel_values
lowerCamelCase__ = tf.constant([[1, 2]] )
lowerCamelCase__ = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
lowerCamelCase__ = model(input_ids=SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
# verify the logits
lowerCamelCase__ = (1, 1_99, 7_68)
self.assertEqual(outputs.last_hidden_state.shape , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tf.constant(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
| 659 |
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_snake_case = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
_snake_case = 10
_snake_case = 256
def snake_case ( _a: List[str] )-> Optional[MinHash]:
'''simple docstring'''
if len(_a ) < MIN_NUM_TOKENS:
return None
lowerCamelCase__ = MinHash(num_perm=_a )
for token in set(_a ):
min_hash.update(token.encode() )
return min_hash
def snake_case ( _a: str )-> Set[str]:
'''simple docstring'''
return {t for t in NON_ALPHA.split(_a ) if len(t.strip() ) > 0}
class _a :
def __init__( self : List[Any] , *,
SCREAMING_SNAKE_CASE__ : float = 0.85 , ):
lowerCamelCase__ = duplication_jaccard_threshold
lowerCamelCase__ = NUM_PERM
lowerCamelCase__ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
lowerCamelCase__ = defaultdict(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : MinHash ):
lowerCamelCase__ = self._index.query(SCREAMING_SNAKE_CASE__ )
if code_key in self._index.keys:
print(F'Duplicate key {code_key}' )
return
self._index.insert(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(SCREAMING_SNAKE_CASE__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = []
for base, duplicates in self._duplicate_clusters.items():
lowerCamelCase__ = [base] + list(SCREAMING_SNAKE_CASE__ )
# reformat the cluster to be a list of dict
lowerCamelCase__ = [{'base_index': el[0], 'repo_name': el[1], 'path': el[2]} for el in cluster]
duplicate_clusters.append(SCREAMING_SNAKE_CASE__ )
return duplicate_clusters
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = self.get_duplicate_clusters()
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case ( _a: Union[str, Any] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = element
lowerCamelCase__ = get_min_hash([t for t in NON_ALPHA.split(data['content'] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def snake_case ( _a: Type[Dataset] )-> Tuple:
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_a , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def snake_case ( _a: Type[Dataset] , _a: float )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = DuplicationIndex(duplication_jaccard_threshold=_a )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_a ) ) , max_queue_size=100 ) ):
di.add(_a , _a )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def snake_case ( _a: str , _a: str )-> float:
'''simple docstring'''
lowerCamelCase__ = get_tokens(_a )
lowerCamelCase__ = get_tokens(_a )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_snake_case = None
def snake_case ( _a: Dict , _a: Union[str, Any] )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = []
for elementa in cluster:
lowerCamelCase__ = _shared_dataset[elementa['base_index']]['content']
for elementa in extremes:
lowerCamelCase__ = _shared_dataset[elementa['base_index']]['content']
if jaccard_similarity(_a , _a ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowerCamelCase__ = 1
extremes.append(_a )
return extremes
def snake_case ( _a: Any , _a: Tuple , _a: Dict )-> Union[str, Any]:
'''simple docstring'''
global _shared_dataset
lowerCamelCase__ = dataset
lowerCamelCase__ = []
lowerCamelCase__ = partial(_find_cluster_extremes_shared , jaccard_threshold=_a )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_a , _a , ) , total=len(_a ) , ):
extremes_list.append(_a )
return extremes_list
def snake_case ( _a: Type[Dataset] , _a: float = 0.85 )-> Tuple[Type[Dataset], List[List[Dict]]]:
'''simple docstring'''
lowerCamelCase__ = make_duplicate_clusters(_a , _a )
lowerCamelCase__ = {x['base_index'] for cluster in duplicate_clusters for x in cluster}
lowerCamelCase__ = {}
lowerCamelCase__ = find_extremes(_a , _a , _a )
for extremes in extremes_clusters:
for element in extremes:
lowerCamelCase__ = element
lowerCamelCase__ = duplicate_indices - set(extreme_dict.keys() )
lowerCamelCase__ = dataset.filter(lambda _a , _a : idx not in remove_indices , with_indices=_a )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowerCamelCase__ = element['base_index'] in extreme_dict
if element["is_extreme"]:
lowerCamelCase__ = extreme_dict[element['base_index']]['copies']
print(F'Original dataset size: {len(_a )}' )
print(F'Number of duplicate clusters: {len(_a )}' )
print(F'Files in duplicate cluster: {len(_a )}' )
print(F'Unique files in duplicate cluster: {len(_a )}' )
print(F'Filtered dataset size: {len(_a )}' )
return ds_filter, duplicate_clusters
| 659 | 1 |
"""simple docstring"""
_snake_case = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_snake_case = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_snake_case = {
0: "Sunday",
1: "Monday",
2: "Tuesday",
3: "Wednesday",
4: "Thursday",
5: "Friday",
6: "Saturday",
}
def snake_case ( _a: int , _a: int , _a: int )-> str:
'''simple docstring'''
assert len(str(_a ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
lowerCamelCase__ = year // 100
lowerCamelCase__ = (5 * (century % 4) + 2) % 7
lowerCamelCase__ = year % 100
lowerCamelCase__ = centurian % 12
lowerCamelCase__ = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
lowerCamelCase__ = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
lowerCamelCase__ = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 659 |
"""simple docstring"""
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_snake_case = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def snake_case ( _a: Any )-> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = test_results.split(' ' )
lowerCamelCase__ = 0
lowerCamelCase__ = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
lowerCamelCase__ = expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(_a ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def snake_case ( _a: Optional[int] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = {}
lowerCamelCase__ = None
lowerCamelCase__ = False
for line in failures_short_lines.split('\n' ):
if re.search(R'_ \[doctest\]' , _a ):
lowerCamelCase__ = True
lowerCamelCase__ = line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
lowerCamelCase__ = line
lowerCamelCase__ = False
return failures
class _a :
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = title
lowerCamelCase__ = doc_test_results['time_spent'].split(',' )[0]
lowerCamelCase__ = doc_test_results['success']
lowerCamelCase__ = doc_test_results['failures']
lowerCamelCase__ = self.n_success + self.n_failures
# Failures and success of the modeling tests
lowerCamelCase__ = doc_test_results
@property
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = [self._time_spent]
lowerCamelCase__ = 0
for time in time_spent:
lowerCamelCase__ = time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(SCREAMING_SNAKE_CASE__ ) == 1:
lowerCamelCase__ = [0, 0, time_parts[0]]
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return F'{int(SCREAMING_SNAKE_CASE__ )}h{int(SCREAMING_SNAKE_CASE__ )}m{int(SCREAMING_SNAKE_CASE__ )}s'
@property
def _UpperCamelCase ( self : Dict ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _UpperCamelCase ( self : Dict ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def _UpperCamelCase ( self : Any ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
F' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = 40
lowerCamelCase__ = {k: v['failed'] for k, v in doc_test_results.items() if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
lowerCamelCase__ = ''
for category, failures in category_failures.items():
if len(SCREAMING_SNAKE_CASE__ ) == 0:
continue
if report != "":
report += "\n\n"
report += F'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(SCREAMING_SNAKE_CASE__ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(SCREAMING_SNAKE_CASE__ )
@staticmethod
def _UpperCamelCase ( ):
lowerCamelCase__ = [
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(SCREAMING_SNAKE_CASE__ )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : Optional[int] ):
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
lowerCamelCase__ = F'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else 'All tests passed.'
lowerCamelCase__ = client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple ):
lowerCamelCase__ = ''
for key, value in failures.items():
lowerCamelCase__ = value[:2_00] + ' [Truncated]' if len(SCREAMING_SNAKE_CASE__ ) > 2_50 else value
failures_text += F'*{key}*\n_{value}_\n\n'
lowerCamelCase__ = job_name
lowerCamelCase__ = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
lowerCamelCase__ = {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _UpperCamelCase ( self : Optional[int] ):
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
lowerCamelCase__ = self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
lowerCamelCase__ = sorted(self.doc_test_results.items() , key=lambda SCREAMING_SNAKE_CASE__ : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
lowerCamelCase__ = F'*Num failures* :{len(job_result["failed"] )} \n'
lowerCamelCase__ = job_result['failures']
lowerCamelCase__ = self.get_reply_blocks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , text=SCREAMING_SNAKE_CASE__ )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=F'Results for {job}' , blocks=SCREAMING_SNAKE_CASE__ , thread_ts=self.thread_ts['ts'] , )
time.sleep(1 )
def snake_case ( )-> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = os.environ['GITHUB_RUN_ID']
lowerCamelCase__ = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
lowerCamelCase__ = requests.get(_a ).json()
lowerCamelCase__ = {}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
lowerCamelCase__ = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_a ):
lowerCamelCase__ = requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.' , _a )
return {}
def snake_case ( _a: str )-> Dict:
'''simple docstring'''
lowerCamelCase__ = {}
if os.path.exists(_a ):
lowerCamelCase__ = os.listdir(_a )
for file in files:
try:
with open(os.path.join(_a , _a ) , encoding='utf-8' ) as f:
lowerCamelCase__ = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(_a , _a )}.' ) from e
return _artifact
def snake_case ( )-> Optional[int]:
'''simple docstring'''
class _a :
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = name
lowerCamelCase__ = []
def __str__( self : Dict ):
return self.name
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
self.paths.append({'name': self.name, 'path': path} )
lowerCamelCase__ = {}
lowerCamelCase__ = filter(os.path.isdir , os.listdir() )
for directory in directories:
lowerCamelCase__ = directory
if artifact_name not in _available_artifacts:
lowerCamelCase__ = Artifact(_a )
_available_artifacts[artifact_name].add_path(_a )
return _available_artifacts
if __name__ == "__main__":
_snake_case = get_job_links()
_snake_case = retrieve_available_artifacts()
_snake_case = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_snake_case = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_snake_case = github_actions_job_links.get("run_doctests")
_snake_case = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
_snake_case = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
_snake_case , _snake_case , _snake_case = handle_test_results(artifact["stats"])
_snake_case = failed
_snake_case = success
_snake_case = time_spent[1:-1] + ", "
_snake_case = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
_snake_case = line.replace("FAILED ", "")
_snake_case = line.split()[0].replace("\n", "")
if "::" in line:
_snake_case , _snake_case = line.split("::")
else:
_snake_case , _snake_case = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_snake_case = docs[file_regex]
doc_test_results[category]["failed"].append(test)
_snake_case = all_failures[test] if test in all_failures else "N/A"
_snake_case = failure
break
_snake_case = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 659 | 1 |
"""simple docstring"""
def snake_case ( _a: int )-> "list[int]":
'''simple docstring'''
if upper_limit < 0:
raise ValueError('Limit for the Catalan sequence must be ≥ 0' )
lowerCamelCase__ = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
lowerCamelCase__ = 1
if upper_limit > 0:
lowerCamelCase__ = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(_a ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("\n********* Catalan Numbers Using Dynamic Programming ************\n")
print("\n*** Enter -1 at any time to quit ***")
print("\nEnter the upper limit (≥ 0) for the Catalan number sequence: ", end="")
try:
while True:
_snake_case = int(input().strip())
if N < 0:
print("\n********* Goodbye!! ************")
break
else:
print(f"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print("Try another upper limit for the sequence: ", end="")
except (NameError, ValueError):
print("\n********* Invalid input, goodbye! ************\n")
import doctest
doctest.testmod()
| 659 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n"
@dataclass
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Union[PIL.Image.Image, np.ndarray]
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : PriorTransformer , SCREAMING_SNAKE_CASE__ : CLIPVisionModel , SCREAMING_SNAKE_CASE__ : CLIPImageProcessor , SCREAMING_SNAKE_CASE__ : HeunDiscreteScheduler , SCREAMING_SNAKE_CASE__ : ShapERenderer , ):
super().__init__()
self.register_modules(
prior=SCREAMING_SNAKE_CASE__ , image_encoder=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , renderer=SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ):
if latents is None:
lowerCamelCase__ = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowerCamelCase__ = latents.to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = latents * scheduler.init_noise_sigma
return latents
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowerCamelCase__ = torch.device(F'cuda:{gpu_id}' )
lowerCamelCase__ = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@property
def _UpperCamelCase ( self : Dict ):
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(SCREAMING_SNAKE_CASE__ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(image[0] , torch.Tensor ):
lowerCamelCase__ = torch.cat(SCREAMING_SNAKE_CASE__ , axis=0 ) if image[0].ndim == 4 else torch.stack(SCREAMING_SNAKE_CASE__ , axis=0 )
if not isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
lowerCamelCase__ = self.image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
lowerCamelCase__ = image.to(dtype=self.image_encoder.dtype , device=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.image_encoder(SCREAMING_SNAKE_CASE__ )['last_hidden_state']
lowerCamelCase__ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowerCamelCase__ = image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE__ , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase__ = torch.zeros_like(SCREAMING_SNAKE_CASE__ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase__ = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(SCREAMING_SNAKE_CASE__ )
def __call__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[PIL.Image.Image, List[PIL.Image.Image]] , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : int = 25 , SCREAMING_SNAKE_CASE__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE__ : float = 4.0 , SCREAMING_SNAKE_CASE__ : int = 64 , SCREAMING_SNAKE_CASE__ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE__ : bool = True , ):
if isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ):
lowerCamelCase__ = 1
elif isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
lowerCamelCase__ = image.shape[0]
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(
F'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(SCREAMING_SNAKE_CASE__ )}' )
lowerCamelCase__ = self._execution_device
lowerCamelCase__ = batch_size * num_images_per_prompt
lowerCamelCase__ = guidance_scale > 1.0
lowerCamelCase__ = self._encode_image(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# prior
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.scheduler.timesteps
lowerCamelCase__ = self.prior.config.num_embeddings
lowerCamelCase__ = self.prior.config.embedding_dim
lowerCamelCase__ = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowerCamelCase__ = latents.reshape(latents.shape[0] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE__ ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase__ = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.prior(
SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , proj_embedding=SCREAMING_SNAKE_CASE__ , ).predicted_image_embedding
# remove the variance
lowerCamelCase__ , lowerCamelCase__ = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowerCamelCase__ , lowerCamelCase__ = noise_pred.chunk(2 )
lowerCamelCase__ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowerCamelCase__ = self.scheduler.step(
SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , sample=SCREAMING_SNAKE_CASE__ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = []
for i, latent in enumerate(SCREAMING_SNAKE_CASE__ ):
print()
lowerCamelCase__ = self.renderer.decode(
latent[None, :] , SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , ray_batch_size=40_96 , n_coarse_samples=64 , n_fine_samples=1_28 , )
images.append(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.stack(SCREAMING_SNAKE_CASE__ )
if output_type not in ["np", "pil"]:
raise ValueError(F'Only the output types `pil` and `np` are supported not output_type={output_type}' )
lowerCamelCase__ = images.cpu().numpy()
if output_type == "pil":
lowerCamelCase__ = [self.numpy_to_pil(SCREAMING_SNAKE_CASE__ ) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=SCREAMING_SNAKE_CASE__ )
| 659 | 1 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _a ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Union[str, Any] = None
a_ : Dict = BloomTokenizerFast
a_ : Union[str, Any] = BloomTokenizerFast
a_ : Optional[int] = True
a_ : str = False
a_ : Optional[int] = 'tokenizer_file'
a_ : Optional[int] = {'bos_token': '<s>', 'eos_token': '</s>', 'unk_token': '<unk>', 'pad_token': '<pad>'}
def _UpperCamelCase ( self : Optional[int] ):
super().setUp()
lowerCamelCase__ = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCamelCase ( self : List[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ):
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = self.get_rust_tokenizer()
lowerCamelCase__ = ['The quick brown fox</s>', 'jumps over the lazy dog</s>']
lowerCamelCase__ = [[21_75, 2_37_14, 7_31_73, 14_42_52, 2], [77, 13_26_19, 34_78, 3_68, 10_95_86, 3_54_33, 2]]
lowerCamelCase__ = tokenizer.batch_encode_plus(SCREAMING_SNAKE_CASE__ )['input_ids']
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any]=6 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowerCamelCase__ = 'This is a simple input'
lowerCamelCase__ = ['This is a simple input 1', 'This is a simple input 2']
lowerCamelCase__ = ('This is a simple input', 'This is a pair')
lowerCamelCase__ = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
try:
tokenizer_r.encode(SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
tokenizer_r.encode_plus(SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
tokenizer_r.batch_encode_plus(SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
tokenizer_r.encode(SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
tokenizer_r.batch_encode_plus(SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
except ValueError:
self.fail('Bloom Tokenizer should be able to deal with padding' )
lowerCamelCase__ = None # Hotfixing padding = None
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='max_length' )
# Simple input
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='max_length' )
# Simple input
self.assertRaises(
SCREAMING_SNAKE_CASE__ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='max_length' , )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='max_length' )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='max_length' )
# Pair input
self.assertRaises(
SCREAMING_SNAKE_CASE__ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='max_length' , )
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ = self.get_rust_tokenizer()
lowerCamelCase__ = load_dataset('xnli' , 'all_languages' , split='test' , streaming=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = next(iter(SCREAMING_SNAKE_CASE__ ) )['premise'] # pick up one data
lowerCamelCase__ = list(sample_data.values() )
lowerCamelCase__ = list(map(tokenizer.encode , SCREAMING_SNAKE_CASE__ ) )
lowerCamelCase__ = [tokenizer.decode(SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ ) for x in output_tokens]
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[int] ):
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 659 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"facebook/nllb-large-en-ro": 1024,
"facebook/nllb-200-distilled-600M": 1024,
}
# fmt: off
_snake_case = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Any = VOCAB_FILES_NAMES
a_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a_ : List[str] = ['input_ids', 'attention_mask']
a_ : Union[str, Any] = NllbTokenizer
a_ : List[int] = []
a_ : List[int] = []
def __init__( self : int , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : Any="</s>" , SCREAMING_SNAKE_CASE__ : List[str]="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="<unk>" , SCREAMING_SNAKE_CASE__ : Optional[int]="<pad>" , SCREAMING_SNAKE_CASE__ : Any="<mask>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Tuple=False , **SCREAMING_SNAKE_CASE__ : str , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
lowerCamelCase__ = legacy_behaviour
super().__init__(
vocab_file=SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , legacy_behaviour=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = vocab_file
lowerCamelCase__ = False if not self.vocab_file else True
lowerCamelCase__ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
lowerCamelCase__ = {
lang_code: self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase__ = src_lang if src_lang is not None else 'eng_Latn'
lowerCamelCase__ = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _UpperCamelCase ( self : str ):
return self._src_lang
@src_lang.setter
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] , SCREAMING_SNAKE_CASE__ : Optional[str] , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowerCamelCase__ = src_lang
lowerCamelCase__ = self(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tgt_lang_id
return inputs
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str = "eng_Latn" , SCREAMING_SNAKE_CASE__ : Optional[List[str]] = None , SCREAMING_SNAKE_CASE__ : str = "fra_Latn" , **SCREAMING_SNAKE_CASE__ : Dict , ):
lowerCamelCase__ = src_lang
lowerCamelCase__ = tgt_lang
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[str] ):
return self.set_src_lang_special_tokens(self.src_lang )
def _UpperCamelCase ( self : List[Any] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
lowerCamelCase__ = []
lowerCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ = [self.cur_lang_code]
lowerCamelCase__ = [self.eos_token_id]
lowerCamelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
lowerCamelCase__ = []
lowerCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ = [self.cur_lang_code]
lowerCamelCase__ = [self.eos_token_id]
lowerCamelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
lowerCamelCase__ = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 659 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class _a :
a_ : List[str]
a_ : Optional[str] = None
# Automatically constructed
a_ : ClassVar[str] = "dict"
a_ : ClassVar[Any] = None
a_ : str = field(default='Translation' , init=SCREAMING_SNAKE_CASE_ , repr=SCREAMING_SNAKE_CASE_ )
def __call__( self : Optional[int] ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def _UpperCamelCase ( self : str ):
from .features import Value
return {k: Value('string' ) for k in sorted(self.languages )}
@dataclass
class _a :
a_ : Optional[List] = None
a_ : Optional[int] = None
a_ : Optional[str] = None
# Automatically constructed
a_ : ClassVar[str] = "dict"
a_ : ClassVar[Any] = None
a_ : str = field(default='TranslationVariableLanguages' , init=SCREAMING_SNAKE_CASE_ , repr=SCREAMING_SNAKE_CASE_ )
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = sorted(set(self.languages ) ) if self.languages else None
lowerCamelCase__ = len(self.languages ) if self.languages else None
def __call__( self : str ):
return pa.struct({'language': pa.list_(pa.string() ), 'translation': pa.list_(pa.string() )} )
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = set(self.languages )
if self.languages and set(SCREAMING_SNAKE_CASE__ ) - lang_set:
raise ValueError(
F'Some languages in example ({", ".join(sorted(set(SCREAMING_SNAKE_CASE__ ) - lang_set ) )}) are not in valid set ({", ".join(SCREAMING_SNAKE_CASE__ )}).' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowerCamelCase__ = []
for lang, text in translation_dict.items():
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowerCamelCase__ , lowerCamelCase__ = zip(*sorted(SCREAMING_SNAKE_CASE__ ) )
return {"language": languages, "translation": translations}
def _UpperCamelCase ( self : int ):
from .features import Sequence, Value
return {
"language": Sequence(Value('string' ) ),
"translation": Sequence(Value('string' ) ),
}
| 659 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class _a :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Any=99 , SCREAMING_SNAKE_CASE__ : Optional[Any]=32 , SCREAMING_SNAKE_CASE__ : str=32 , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=37 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : str=5_12 , SCREAMING_SNAKE_CASE__ : str=0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : Any=None , ):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = projection_dim
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = dropout
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = initializer_range
lowerCamelCase__ = scope
lowerCamelCase__ = bos_token_id
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
lowerCamelCase__ = input_mask.numpy()
lowerCamelCase__ , lowerCamelCase__ = input_mask.shape
lowerCamelCase__ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = 1
lowerCamelCase__ = 0
lowerCamelCase__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Any ):
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ):
lowerCamelCase__ = TFBlipTextModel(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = config_and_inputs
lowerCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _a ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : str = (TFBlipTextModel,) if is_tf_available() else ()
a_ : List[str] = False
a_ : Optional[Any] = False
a_ : Union[str, Any] = False
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = BlipTextModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def _UpperCamelCase ( self : Tuple ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Tuple ):
pass
def _UpperCamelCase ( self : Tuple ):
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def _UpperCamelCase ( self : List[str] ):
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _UpperCamelCase ( self : Dict ):
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _UpperCamelCase ( self : List[Any] ):
pass
@slow
def _UpperCamelCase ( self : str ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFBlipTextModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=SCREAMING_SNAKE_CASE__ )
| 659 | 1 |
"""simple docstring"""
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def snake_case ( _a: List[str] , _a: Dict=() , _a: List[Any]=None , _a: List[Any]="no" , _a: Optional[Any]="29500" )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = False
lowerCamelCase__ = False
if any(key.startswith('KAGGLE' ) for key in os.environ.keys() ):
lowerCamelCase__ = True
elif "IPython" in sys.modules:
lowerCamelCase__ = 'google.colab' in str(sys.modules['IPython'].get_ipython() )
try:
lowerCamelCase__ = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.' )
if (in_colab or in_kaggle) and (os.environ.get('TPU_NAME' , _a ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '
'your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if num_processes is None:
lowerCamelCase__ = 8
lowerCamelCase__ = PrepareForLaunch(_a , distributed_type='TPU' )
print(F'Launching a training on {num_processes} TPU cores.' )
xmp.spawn(_a , args=_a , nprocs=_a , start_method='fork' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on one CPU.' )
function(*_a )
else:
if num_processes is None:
raise ValueError(
'You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '
'inside your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if torch.cuda.is_initialized():
raise ValueError(
'To launch a multi-GPU training from your notebook, you need to avoid running any instruction '
'using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '
'function.' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_a , master_addr='127.0.01' , master_port=_a , mixed_precision=_a ):
lowerCamelCase__ = PrepareForLaunch(_a , distributed_type='MULTI_GPU' )
print(F'Launching training on {num_processes} GPUs.' )
try:
start_processes(_a , args=_a , nprocs=_a , start_method='fork' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '
'This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '
'Please review your imports and test them when running the `notebook_launcher()` to identify '
'which one is problematic.' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
lowerCamelCase__ = '1'
print('Launching training on MPS.' )
elif torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on CPU.' )
function(*_a )
def snake_case ( _a: Tuple , _a: Union[str, Any]=() , _a: List[Any]=2 )-> Union[str, Any]:
'''simple docstring'''
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_a , master_addr='127.0.01' , master_port='29500' , accelerate_mixed_precision='no' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='yes' , ):
lowerCamelCase__ = PrepareForLaunch(_a , debug=_a )
start_processes(_a , args=_a , nprocs=_a , start_method='fork' )
| 659 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 659 | 1 |
"""simple docstring"""
from collections import deque
def snake_case ( _a: Union[str, Any] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = len(_a )
lowerCamelCase__ = deque()
lowerCamelCase__ = [False for _ in range(_a )]
lowerCamelCase__ = [-1 for _ in range(_a )]
lowerCamelCase__ = index_of[:]
def strong_connect(_a: Optional[Any] , _a: str , _a: Optional[int] ):
lowerCamelCase__ = index # the number when this node is seen
lowerCamelCase__ = index # lowest rank node reachable from here
index += 1
stack.append(_a )
lowerCamelCase__ = True
for w in g[v]:
if index_of[w] == -1:
lowerCamelCase__ = strong_connect(_a , _a , _a )
lowerCamelCase__ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
lowerCamelCase__ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
lowerCamelCase__ = []
lowerCamelCase__ = stack.pop()
lowerCamelCase__ = False
component.append(_a )
while w != v:
lowerCamelCase__ = stack.pop()
lowerCamelCase__ = False
component.append(_a )
components.append(_a )
return index
lowerCamelCase__ = []
for v in range(_a ):
if index_of[v] == -1:
strong_connect(_a , 0 , _a )
return components
def snake_case ( _a: Optional[Any] , _a: List[Any] )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = [[] for _ in range(_a )]
for u, v in edges:
g[u].append(_a )
return g
if __name__ == "__main__":
# Test
_snake_case = 7
_snake_case = [0, 0, 1, 2, 3, 3, 4, 4, 6]
_snake_case = [1, 3, 2, 0, 1, 4, 5, 6, 5]
_snake_case = [(u, v) for u, v in zip(source, target)]
_snake_case = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 659 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_snake_case = [
"small",
"small-base",
"medium",
"medium-base",
"intermediate",
"intermediate-base",
"large",
"large-base",
"xlarge",
"xlarge-base",
]
_snake_case = {
"vocab_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json",
"funnel-transformer/small-base": (
"https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"
),
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json",
"funnel-transformer/large-base": (
"https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"
),
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"
),
},
}
_snake_case = {f"""funnel-transformer/{name}""": 512 for name in _model_names}
_snake_case = {f"""funnel-transformer/{name}""": {"do_lower_case": True} for name in _model_names}
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : List[str] = VOCAB_FILES_NAMES
a_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
a_ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
a_ : List[str] = FunnelTokenizer
a_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : int = 2
def __init__( self : int , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Any="<unk>" , SCREAMING_SNAKE_CASE__ : List[Any]="<sep>" , SCREAMING_SNAKE_CASE__ : int="<pad>" , SCREAMING_SNAKE_CASE__ : Tuple="<cls>" , SCREAMING_SNAKE_CASE__ : Tuple="<mask>" , SCREAMING_SNAKE_CASE__ : Any="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="</s>" , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : int="##" , **SCREAMING_SNAKE_CASE__ : Any , ):
super().__init__(
SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , clean_text=SCREAMING_SNAKE_CASE__ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ , wordpieces_prefix=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , SCREAMING_SNAKE_CASE__ ) != do_lower_case
or normalizer_state.get('strip_accents' , SCREAMING_SNAKE_CASE__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , SCREAMING_SNAKE_CASE__ ) != tokenize_chinese_chars
):
lowerCamelCase__ = getattr(SCREAMING_SNAKE_CASE__ , normalizer_state.pop('type' ) )
lowerCamelCase__ = do_lower_case
lowerCamelCase__ = strip_accents
lowerCamelCase__ = tokenize_chinese_chars
lowerCamelCase__ = normalizer_class(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = do_lower_case
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
lowerCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
lowerCamelCase__ = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
| 659 | 1 |
"""simple docstring"""
import datasets
from .evaluate import evaluate
_snake_case = "\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n"
_snake_case = "\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n"
_snake_case = "\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the CUAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\n 'aupr': Area Under the Precision-Recall curve\n 'prec_at_80_recall': Precision at 80% recall\n 'prec_at_90_recall': Precision at 90% recall\nExamples:\n >>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> cuad_metric = datasets.load_metric(\"cuad\")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
def _UpperCamelCase ( self : Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': {
'id': datasets.Value('string' ),
'prediction_text': datasets.features.Sequence(datasets.Value('string' ) ),
},
'references': {
'id': datasets.Value('string' ),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
},
} ) , codebase_urls=['https://www.atticusprojectai.org/cuad'] , reference_urls=['https://www.atticusprojectai.org/cuad'] , )
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple ):
lowerCamelCase__ = {prediction['id']: prediction['prediction_text'] for prediction in predictions}
lowerCamelCase__ = [
{
'paragraphs': [
{
'qas': [
{
'answers': [{'text': answer_text} for answer_text in ref['answers']['text']],
'id': ref['id'],
}
for ref in references
]
}
]
}
]
lowerCamelCase__ = evaluate(dataset=SCREAMING_SNAKE_CASE__ , predictions=SCREAMING_SNAKE_CASE__ )
return score
| 659 |
"""simple docstring"""
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def snake_case ( _a: Optional[Any] )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = [False] * len(_a )
lowerCamelCase__ = [-1] * len(_a )
def dfs(_a: Any , _a: Optional[int] ):
lowerCamelCase__ = True
lowerCamelCase__ = c
for u in graph[v]:
if not visited[u]:
dfs(_a , 1 - c )
for i in range(len(_a ) ):
if not visited[i]:
dfs(_a , 0 )
for i in range(len(_a ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_snake_case = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 659 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : torch.FloatTensor
class _a ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
@register_to_config
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : Tuple[str] = ("DownEncoderBlock2D",) , SCREAMING_SNAKE_CASE__ : Tuple[str] = ("UpDecoderBlock2D",) , SCREAMING_SNAKE_CASE__ : Tuple[int] = (64,) , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : str = "silu" , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : int = 32 , SCREAMING_SNAKE_CASE__ : int = 2_56 , SCREAMING_SNAKE_CASE__ : int = 32 , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : float = 0.1_82_15 , SCREAMING_SNAKE_CASE__ : str = "group" , ):
super().__init__()
# pass init params to Encoder
lowerCamelCase__ = Encoder(
in_channels=SCREAMING_SNAKE_CASE__ , out_channels=SCREAMING_SNAKE_CASE__ , down_block_types=SCREAMING_SNAKE_CASE__ , block_out_channels=SCREAMING_SNAKE_CASE__ , layers_per_block=SCREAMING_SNAKE_CASE__ , act_fn=SCREAMING_SNAKE_CASE__ , norm_num_groups=SCREAMING_SNAKE_CASE__ , double_z=SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCamelCase__ = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
lowerCamelCase__ = VectorQuantizer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , beta=0.25 , remap=SCREAMING_SNAKE_CASE__ , sane_index_shape=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
# pass init params to Decoder
lowerCamelCase__ = Decoder(
in_channels=SCREAMING_SNAKE_CASE__ , out_channels=SCREAMING_SNAKE_CASE__ , up_block_types=SCREAMING_SNAKE_CASE__ , block_out_channels=SCREAMING_SNAKE_CASE__ , layers_per_block=SCREAMING_SNAKE_CASE__ , act_fn=SCREAMING_SNAKE_CASE__ , norm_num_groups=SCREAMING_SNAKE_CASE__ , norm_type=SCREAMING_SNAKE_CASE__ , )
@apply_forward_hook
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : bool = True ):
lowerCamelCase__ = self.encoder(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.quant_conv(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=SCREAMING_SNAKE_CASE__ )
@apply_forward_hook
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = True ):
# also go through quantization layer
if not force_not_quantize:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self.quantize(SCREAMING_SNAKE_CASE__ )
else:
lowerCamelCase__ = h
lowerCamelCase__ = self.post_quant_conv(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.decoder(SCREAMING_SNAKE_CASE__ , quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : bool = True ):
lowerCamelCase__ = sample
lowerCamelCase__ = self.encode(SCREAMING_SNAKE_CASE__ ).latents
lowerCamelCase__ = self.decode(SCREAMING_SNAKE_CASE__ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=SCREAMING_SNAKE_CASE__ )
| 659 |
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
_snake_case = TypeVar("KEY")
_snake_case = TypeVar("VAL")
@dataclass(frozen=SCREAMING_SNAKE_CASE_ , slots=SCREAMING_SNAKE_CASE_ )
class _a ( Generic[KEY, VAL] ):
a_ : KEY
a_ : VAL
class _a ( _Item ):
def __init__( self : List[str] ):
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __bool__( self : str ):
return False
_snake_case = _DeletedItem()
class _a ( MutableMapping[KEY, VAL] ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : float = 0.75 ):
lowerCamelCase__ = initial_block_size
lowerCamelCase__ = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCamelCase__ = capacity_factor
lowerCamelCase__ = 0
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : KEY ):
return hash(SCREAMING_SNAKE_CASE__ ) % len(self._buckets )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : int ):
return (ind + 1) % len(self._buckets )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
lowerCamelCase__ = self._buckets[ind]
if not stored:
lowerCamelCase__ = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self._len += 1
return True
elif stored.key == key:
lowerCamelCase__ = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return True
else:
return False
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : int ):
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCamelCase__ = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = self._buckets
lowerCamelCase__ = [None] * new_size
lowerCamelCase__ = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def _UpperCamelCase ( self : List[str] ):
self._resize(len(self._buckets ) * 2 )
def _UpperCamelCase ( self : Optional[int] ):
self._resize(len(self._buckets ) // 2 )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : KEY ):
lowerCamelCase__ = self._get_bucket_index(SCREAMING_SNAKE_CASE__ )
for _ in range(len(self._buckets ) ):
yield ind
lowerCamelCase__ = self._get_next_ind(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
if self._try_set(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
break
def __setitem__( self : Dict , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
if self._is_full():
self._size_up()
self._add_item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __delitem__( self : Dict , SCREAMING_SNAKE_CASE__ : KEY ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = self._buckets[ind]
if item is None:
raise KeyError(SCREAMING_SNAKE_CASE__ )
if item is _deleted:
continue
if item.key == key:
lowerCamelCase__ = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : str , SCREAMING_SNAKE_CASE__ : KEY ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(SCREAMING_SNAKE_CASE__ )
def __len__( self : List[Any] ):
return self._len
def __iter__( self : Optional[int] ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : str ):
lowerCamelCase__ = ' ,'.join(
F'{item.key}: {item.val}' for item in self._buckets if item )
return F'HashMap({val_string})'
| 659 | 1 |
"""simple docstring"""
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_snake_case = "\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
_snake_case = "\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
_snake_case = "\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
def _UpperCamelCase ( self : Any ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/krishnap25/mauve' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/krishnap25/mauve'] , reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] , )
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Optional[int]="auto" , SCREAMING_SNAKE_CASE__ : Optional[Any]=-1 , SCREAMING_SNAKE_CASE__ : int=0.9 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=5 , SCREAMING_SNAKE_CASE__ : int=5_00 , SCREAMING_SNAKE_CASE__ : Optional[int]="gpt2-large" , SCREAMING_SNAKE_CASE__ : Dict=-1 , SCREAMING_SNAKE_CASE__ : Any=10_24 , SCREAMING_SNAKE_CASE__ : int=25 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=5 , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=25 , ):
lowerCamelCase__ = compute_mauve(
p_text=SCREAMING_SNAKE_CASE__ , q_text=SCREAMING_SNAKE_CASE__ , p_features=SCREAMING_SNAKE_CASE__ , q_features=SCREAMING_SNAKE_CASE__ , p_tokens=SCREAMING_SNAKE_CASE__ , q_tokens=SCREAMING_SNAKE_CASE__ , num_buckets=SCREAMING_SNAKE_CASE__ , pca_max_data=SCREAMING_SNAKE_CASE__ , kmeans_explained_var=SCREAMING_SNAKE_CASE__ , kmeans_num_redo=SCREAMING_SNAKE_CASE__ , kmeans_max_iter=SCREAMING_SNAKE_CASE__ , featurize_model_name=SCREAMING_SNAKE_CASE__ , device_id=SCREAMING_SNAKE_CASE__ , max_text_length=SCREAMING_SNAKE_CASE__ , divergence_curve_discretization_size=SCREAMING_SNAKE_CASE__ , mauve_scaling_factor=SCREAMING_SNAKE_CASE__ , verbose=SCREAMING_SNAKE_CASE__ , seed=SCREAMING_SNAKE_CASE__ , )
return out
| 659 |
"""simple docstring"""
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
def count_of_possible_combinations(_a: int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_a )
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
_a: int , _a: list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowerCamelCase__ = sum(
count_of_possible_combinations_with_dp_array(target - item , _a )
for item in array )
lowerCamelCase__ = answer
return answer
lowerCamelCase__ = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_a , _a )
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = [0] * (target + 1)
lowerCamelCase__ = 1
for i in range(1 , target + 1 ):
for j in range(_a ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = 3
_snake_case = 5
_snake_case = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 659 | 1 |
"""simple docstring"""
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
_snake_case = "__DUMMY_TRANSFORMERS_USER__"
_snake_case = "Dummy User"
_snake_case = "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"
_snake_case = "https://hub-ci.huggingface.co"
_snake_case = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
_snake_case = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
_snake_case = Path("~/.huggingface/hub_ci_token").expanduser()
@pytest.fixture
def snake_case ( _a: Union[str, Any] )-> Tuple:
'''simple docstring'''
monkeypatch.setattr(
'huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE' , _a )
@pytest.fixture
def snake_case ( _a: List[Any] )-> Any:
'''simple docstring'''
monkeypatch.setattr('datasets.config.HF_ENDPOINT' , _a )
monkeypatch.setattr('datasets.config.HUB_DATASETS_URL' , _a )
@pytest.fixture
def snake_case ( _a: Optional[Any] )-> List[Any]:
'''simple docstring'''
monkeypatch.setattr('huggingface_hub.hf_api.HfFolder.path_token' , _a )
@pytest.fixture
def snake_case ( _a: List[Any] , _a: List[Any] )-> List[Any]:
'''simple docstring'''
HfFolder.save_token(_a )
yield
HfFolder.delete_token()
@pytest.fixture(scope='session' )
def snake_case ( )-> List[Any]:
'''simple docstring'''
return HfApi(endpoint=_a )
@pytest.fixture(scope='session' )
def snake_case ( _a: HfApi )-> Dict:
'''simple docstring'''
lowerCamelCase__ = HfFolder.get_token()
HfFolder.save_token(_a )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(_a )
@pytest.fixture
def snake_case ( _a: int )-> Optional[Any]:
'''simple docstring'''
def _cleanup_repo(_a: int ):
hf_api.delete_repo(_a , token=_a , repo_type='dataset' )
return _cleanup_repo
@pytest.fixture
def snake_case ( _a: Tuple )-> str:
'''simple docstring'''
@contextmanager
def _temporary_repo(_a: Any ):
try:
yield repo_id
finally:
cleanup_repo(_a )
return _temporary_repo
@pytest.fixture(scope='session' )
def snake_case ( _a: HfApi , _a: Dict , _a: Optional[int] )-> Dict:
'''simple docstring'''
lowerCamelCase__ = F'repo_txt_data-{int(time.time() * 10E3 )}'
lowerCamelCase__ = F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(_a , token=_a , repo_type='dataset' , private=_a )
hf_api.upload_file(
token=_a , path_or_fileobj=str(_a ) , path_in_repo='data/text_data.txt' , repo_id=_a , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(_a , token=_a , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def snake_case ( _a: Dict , _a: int , _a: Any )-> Tuple:
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='session' )
def snake_case ( _a: HfApi , _a: Dict , _a: Optional[Any] )-> str:
'''simple docstring'''
lowerCamelCase__ = F'repo_zipped_txt_data-{int(time.time() * 10E3 )}'
lowerCamelCase__ = F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(_a , token=_a , repo_type='dataset' , private=_a )
hf_api.upload_file(
token=_a , path_or_fileobj=str(_a ) , path_in_repo='data.zip' , repo_id=_a , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(_a , token=_a , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def snake_case ( _a: List[Any] , _a: Tuple , _a: Dict )-> Optional[Any]:
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='session' )
def snake_case ( _a: HfApi , _a: Optional[Any] , _a: Tuple )-> int:
'''simple docstring'''
lowerCamelCase__ = F'repo_zipped_img_data-{int(time.time() * 10E3 )}'
lowerCamelCase__ = F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(_a , token=_a , repo_type='dataset' , private=_a )
hf_api.upload_file(
token=_a , path_or_fileobj=str(_a ) , path_in_repo='data.zip' , repo_id=_a , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(_a , token=_a , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def snake_case ( _a: Optional[Any] , _a: Optional[Any] , _a: List[str] )-> str:
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_
| 659 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 659 | 1 |
"""simple docstring"""
from numpy import exp, pi, sqrt
def snake_case ( _a: Tuple , _a: float = 0.0 , _a: float = 1.0 )-> int:
'''simple docstring'''
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 659 |
"""simple docstring"""
def snake_case ( _a: list[list[float]] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = []
for data in source_data:
for i, el in enumerate(_a ):
if len(_a ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(_a ) )
return data_lists
def snake_case ( _a: list[list[float]] , _a: list[int] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = []
for dlist, weight in zip(_a , _a ):
lowerCamelCase__ = min(_a )
lowerCamelCase__ = max(_a )
lowerCamelCase__ = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowerCamelCase__ = F'Invalid weight of {weight:f} provided'
raise ValueError(_a )
score_lists.append(_a )
return score_lists
def snake_case ( _a: list[list[float]] )-> list[float]:
'''simple docstring'''
lowerCamelCase__ = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(_a ):
lowerCamelCase__ = final_scores[j] + ele
return final_scores
def snake_case ( _a: list[list[float]] , _a: list[int] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = get_data(_a )
lowerCamelCase__ = calculate_each_score(_a , _a )
lowerCamelCase__ = generate_final_scores(_a )
# append scores to source data
for i, ele in enumerate(_a ):
source_data[i].append(_a )
return source_data
| 659 | 1 |
"""simple docstring"""
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
_snake_case = "true"
def snake_case ( _a: List[Any] , _a: Tuple=82 , _a: int=16 )-> Optional[Any]:
'''simple docstring'''
set_seed(42 )
lowerCamelCase__ = RegressionModel()
lowerCamelCase__ = deepcopy(_a )
lowerCamelCase__ = RegressionDataset(length=_a )
lowerCamelCase__ = DataLoader(_a , batch_size=_a )
model.to(accelerator.device )
lowerCamelCase__ , lowerCamelCase__ = accelerator.prepare(_a , _a )
return model, ddp_model, dataloader
def snake_case ( _a: Accelerator , _a: int=False )-> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
lowerCamelCase__ = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(_a: List[Any] ):
lowerCamelCase__ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_a , max_length=_a )
return outputs
with accelerator.main_process_first():
lowerCamelCase__ = dataset.map(
_a , batched=_a , remove_columns=['idx', 'sentence1', 'sentence2'] , )
lowerCamelCase__ = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_a: List[Any] ):
if use_longest:
return tokenizer.pad(_a , padding='longest' , return_tensors='pt' )
return tokenizer.pad(_a , padding='max_length' , max_length=128 , return_tensors='pt' )
return DataLoader(_a , shuffle=_a , collate_fn=_a , batch_size=16 )
def snake_case ( _a: Tuple , _a: List[Any] )-> Tuple:
'''simple docstring'''
lowerCamelCase__ = Accelerator(dispatch_batches=_a , split_batches=_a )
lowerCamelCase__ = get_dataloader(_a , not dispatch_batches )
lowerCamelCase__ = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=_a )
lowerCamelCase__ , lowerCamelCase__ = accelerator.prepare(_a , _a )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def snake_case ( _a: List[Any] , _a: Any , _a: Optional[Any] )-> Any:
'''simple docstring'''
lowerCamelCase__ = []
for batch in dataloader:
lowerCamelCase__ , lowerCamelCase__ = batch.values()
with torch.no_grad():
lowerCamelCase__ = model(_a )
lowerCamelCase__ , lowerCamelCase__ = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
lowerCamelCase__ , lowerCamelCase__ = [], []
for logit, targ in logits_and_targets:
logits.append(_a )
targs.append(_a )
lowerCamelCase__ , lowerCamelCase__ = torch.cat(_a ), torch.cat(_a )
return logits, targs
def snake_case ( _a: Accelerator , _a: Tuple=82 , _a: Dict=False , _a: Dict=False , _a: Any=16 )-> Any:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = get_basic_setup(_a , _a , _a )
lowerCamelCase__ , lowerCamelCase__ = generate_predictions(_a , _a , _a )
assert (
len(_a ) == num_samples
), F'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_a )}'
def snake_case ( _a: bool = False , _a: bool = False )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = evaluate.load('glue' , 'mrpc' )
lowerCamelCase__ , lowerCamelCase__ = get_mrpc_setup(_a , _a )
# First do baseline
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = setup['no']
model.to(_a )
model.eval()
for batch in dataloader:
batch.to(_a )
with torch.inference_mode():
lowerCamelCase__ = model(**_a )
lowerCamelCase__ = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=_a , references=batch['labels'] )
lowerCamelCase__ = metric.compute()
# Then do distributed
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowerCamelCase__ = model(**_a )
lowerCamelCase__ = outputs.logits.argmax(dim=-1 )
lowerCamelCase__ = batch['labels']
lowerCamelCase__ , lowerCamelCase__ = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=_a , references=_a )
lowerCamelCase__ = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'
def snake_case ( )-> Dict:
'''simple docstring'''
lowerCamelCase__ = Accelerator(split_batches=_a , dispatch_batches=_a )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' )
test_mrpc(_a , _a )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowerCamelCase__ = Accelerator(split_batches=_a , dispatch_batches=_a )
if accelerator.is_local_main_process:
print(F'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' )
test_torch_metrics(_a , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
lowerCamelCase__ = Accelerator()
test_torch_metrics(_a , 512 )
accelerator.state._reset_state()
def snake_case ( _a: List[Any] )-> int:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 659 |
"""simple docstring"""
from __future__ import annotations
from math import gcd
def snake_case ( _a: int , _a: int = 2 , _a: int = 1 , _a: int = 3 , )-> int | None:
'''simple docstring'''
if num < 2:
raise ValueError('The input value cannot be less than 2' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(_a: int , _a: int , _a: int ) -> int:
return (pow(_a , 2 ) + step) % modulus
for _ in range(_a ):
# These track the position within the cycle detection logic.
lowerCamelCase__ = seed
lowerCamelCase__ = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
lowerCamelCase__ = rand_fn(_a , _a , _a )
lowerCamelCase__ = rand_fn(_a , _a , _a )
lowerCamelCase__ = rand_fn(_a , _a , _a )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
lowerCamelCase__ = gcd(hare - tortoise , _a )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
lowerCamelCase__ = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"num",
type=int,
help="The value to find a divisor of",
)
parser.add_argument(
"--attempts",
type=int,
default=3,
help="The number of attempts before giving up",
)
_snake_case = parser.parse_args()
_snake_case = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f"""{args.num} is probably prime""")
else:
_snake_case = args.num // divisor
print(f"""{args.num} = {divisor} * {quotient}""")
| 659 | 1 |
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
_snake_case = {
"E": 12.70,
"T": 9.06,
"A": 8.17,
"O": 7.51,
"I": 6.97,
"N": 6.75,
"S": 6.33,
"H": 6.09,
"R": 5.99,
"D": 4.25,
"L": 4.03,
"C": 2.78,
"U": 2.76,
"M": 2.41,
"W": 2.36,
"F": 2.23,
"G": 2.02,
"Y": 1.97,
"P": 1.93,
"B": 1.29,
"V": 0.98,
"K": 0.77,
"J": 0.15,
"X": 0.15,
"Q": 0.10,
"Z": 0.07,
}
_snake_case = "ETAOINSHRDLCUMWFGYPBVKJXQZ"
_snake_case = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def snake_case ( _a: str )-> dict[str, int]:
'''simple docstring'''
lowerCamelCase__ = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def snake_case ( _a: tuple )-> str:
'''simple docstring'''
return x[0]
def snake_case ( _a: str )-> str:
'''simple docstring'''
lowerCamelCase__ = get_letter_count(_a )
lowerCamelCase__ = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(_a )
lowerCamelCase__ = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=_a )
lowerCamelCase__ = ''.join(freq_to_letter[freq] )
lowerCamelCase__ = list(freq_to_letter_str.items() )
freq_pairs.sort(key=_a , reverse=_a )
lowerCamelCase__ = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(_a )
def snake_case ( _a: str )-> int:
'''simple docstring'''
lowerCamelCase__ = get_frequency_order(_a )
lowerCamelCase__ = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 659 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 659 | 1 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : List[str] = 'van'
def __init__( self : int , SCREAMING_SNAKE_CASE__ : Optional[Any]=2_24 , SCREAMING_SNAKE_CASE__ : Any=3 , SCREAMING_SNAKE_CASE__ : List[str]=[7, 3, 3, 3] , SCREAMING_SNAKE_CASE__ : Dict=[4, 2, 2, 2] , SCREAMING_SNAKE_CASE__ : Any=[64, 1_28, 3_20, 5_12] , SCREAMING_SNAKE_CASE__ : Optional[Any]=[3, 3, 12, 3] , SCREAMING_SNAKE_CASE__ : List[Any]=[8, 8, 4, 4] , SCREAMING_SNAKE_CASE__ : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE__ : Dict=0.02 , SCREAMING_SNAKE_CASE__ : Dict=1e-6 , SCREAMING_SNAKE_CASE__ : Any=1e-2 , SCREAMING_SNAKE_CASE__ : List[str]=0.0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.0 , **SCREAMING_SNAKE_CASE__ : Tuple , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = image_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = patch_sizes
lowerCamelCase__ = strides
lowerCamelCase__ = hidden_sizes
lowerCamelCase__ = depths
lowerCamelCase__ = mlp_ratios
lowerCamelCase__ = hidden_act
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = layer_scale_init_value
lowerCamelCase__ = drop_path_rate
lowerCamelCase__ = dropout_rate
| 659 |
"""simple docstring"""
from __future__ import annotations
_snake_case = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def snake_case ( _a: list[list[int]] , _a: list[int] , _a: list[int] , _a: int , _a: list[list[int]] , )-> tuple[list[list[int]], list[list[int]]]:
'''simple docstring'''
lowerCamelCase__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_a ) )
] # the reference grid
lowerCamelCase__ = 1
lowerCamelCase__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_a ) )
] # the action grid
lowerCamelCase__ = init[0]
lowerCamelCase__ = init[1]
lowerCamelCase__ = 0
lowerCamelCase__ = g + heuristic[x][y] # cost from starting cell to destination cell
lowerCamelCase__ = [[f, g, x, y]]
lowerCamelCase__ = False # flag that is set when search is complete
lowerCamelCase__ = False # flag set if we can't find expand
while not found and not resign:
if len(_a ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
lowerCamelCase__ = cell.pop()
lowerCamelCase__ = next_cell[2]
lowerCamelCase__ = next_cell[3]
lowerCamelCase__ = next_cell[1]
if x == goal[0] and y == goal[1]:
lowerCamelCase__ = True
else:
for i in range(len(_a ) ): # to try out different valid actions
lowerCamelCase__ = x + DIRECTIONS[i][0]
lowerCamelCase__ = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_a ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
lowerCamelCase__ = g + cost
lowerCamelCase__ = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
lowerCamelCase__ = 1
lowerCamelCase__ = i
lowerCamelCase__ = []
lowerCamelCase__ = goal[0]
lowerCamelCase__ = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
lowerCamelCase__ = x - DIRECTIONS[action[x][y]][0]
lowerCamelCase__ = y - DIRECTIONS[action[x][y]][1]
lowerCamelCase__ = xa
lowerCamelCase__ = ya
invpath.append([x, y] )
lowerCamelCase__ = []
for i in range(len(_a ) ):
path.append(invpath[len(_a ) - 1 - i] )
return path, action
if __name__ == "__main__":
_snake_case = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
_snake_case = [0, 0]
# all coordinates are given in format [y,x]
_snake_case = [len(grid) - 1, len(grid[0]) - 1]
_snake_case = 1
# the cost map which pushes the path closer to the goal
_snake_case = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
_snake_case = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
_snake_case = 99
_snake_case , _snake_case = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 659 | 1 |
"""simple docstring"""
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
_snake_case = logging.get_logger(__name__)
def snake_case ( _a: nn.ModuleList , _a: nn.ModuleList , _a: List[int] )-> None:
'''simple docstring'''
lowerCamelCase__ = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(_a ) == len(_a ), F'{len(_a )} != {len(_a )}'
dest_layers.load_state_dict(layers_to_copy.state_dict() )
_snake_case = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
_snake_case = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def snake_case ( _a: int , _a: List[Any] )-> List[str]:
'''simple docstring'''
try:
lowerCamelCase__ = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F'no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'
F' {n_student}' )
return list(range(_a ) )
def snake_case ( _a: List[str] , _a: Optional[int] )-> List[int]:
'''simple docstring'''
if n_student > n_teacher:
raise ValueError(F'Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}' )
elif n_teacher == n_student:
return list(range(_a ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def snake_case ( _a: Union[str, PreTrainedModel] , _a: Union[str, Path] = "student" , _a: Union[int, None] = None , _a: Union[int, None] = None , _a: Optional[Any]=False , _a: int=None , _a: str=None , **_a: Optional[int] , )-> Tuple[PreTrainedModel, List[int], List[int]]:
'''simple docstring'''
lowerCamelCase__ = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'
assert (e is not None) or (d is not None), _msg
if isinstance(_a , _a ):
AutoTokenizer.from_pretrained(_a ).save_pretrained(_a ) # purely for convenience
lowerCamelCase__ = AutoModelForSeqaSeqLM.from_pretrained(_a ).eval()
else:
assert isinstance(_a , _a ), F'teacher must be a model or string got type {type(_a )}'
lowerCamelCase__ = teacher.config.to_diff_dict()
try:
lowerCamelCase__ , lowerCamelCase__ = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
lowerCamelCase__ = teacher_e
if d is None:
lowerCamelCase__ = teacher_d
init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} )
except AttributeError: # T5
if hasattr(teacher.config , 'num_encoder_layers' ):
lowerCamelCase__ , lowerCamelCase__ = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
lowerCamelCase__ , lowerCamelCase__ = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
lowerCamelCase__ = teacher_e
if d is None:
lowerCamelCase__ = teacher_d
if hasattr(teacher.config , 'num_encoder_layers' ):
init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} )
else:
init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(_a )
# Copy weights
lowerCamelCase__ = teacher.config_class(**_a )
lowerCamelCase__ = AutoModelForSeqaSeqLM.from_config(_a )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
lowerCamelCase__ = student.load_state_dict(teacher.state_dict() , strict=_a )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
lowerCamelCase__ , lowerCamelCase__ = list(range(_a ) ), list(range(_a ) )
logger.info(
F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'
F' {save_path}' )
student.save_pretrained(_a )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
lowerCamelCase__ = pick_layers_to_copy(_a , _a )
if d_layers_to_copy is None:
lowerCamelCase__ = pick_layers_to_copy(_a , _a )
try:
if hasattr(
_a , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , _a )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , _a )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , _a )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , _a )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , _a )
copy_layers(teacher.decoder.block , student.decoder.block , _a )
logger.info(
F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}' )
lowerCamelCase__ = {
'teacher_type': teacher.config.model_type,
'copied_encoder_layers': e_layers_to_copy,
'copied_decoder_layers': d_layers_to_copy,
}
student.save_pretrained(_a )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 659 |
"""simple docstring"""
def snake_case ( _a: int = 4000000 )-> int:
'''simple docstring'''
lowerCamelCase__ = [0, 1]
lowerCamelCase__ = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowerCamelCase__ = 0
for j in range(len(_a ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 659 | 1 |
"""simple docstring"""
_snake_case = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
_snake_case = [{"type": "code", "content": INSTALL_CONTENT}]
_snake_case = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 659 |
"""simple docstring"""
def snake_case ( _a: List[Any] , _a: Any , _a: str , _a: List[Any] )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = [False] * len(_a )
lowerCamelCase__ = []
queue.append(_a )
lowerCamelCase__ = True
while queue:
lowerCamelCase__ = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_a )
lowerCamelCase__ = True
lowerCamelCase__ = u
return visited[t]
def snake_case ( _a: List[Any] , _a: str , _a: List[str] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = [-1] * (len(_a ))
lowerCamelCase__ = 0
while bfs(_a , _a , _a , _a ):
lowerCamelCase__ = float('Inf' )
lowerCamelCase__ = sink
while s != source:
# Find the minimum value in select path
lowerCamelCase__ = min(_a , graph[parent[s]][s] )
lowerCamelCase__ = parent[s]
max_flow += path_flow
lowerCamelCase__ = sink
while v != source:
lowerCamelCase__ = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowerCamelCase__ = parent[v]
return max_flow
_snake_case = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_snake_case , _snake_case = 0, 5
print(ford_fulkerson(graph, source, sink))
| 659 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.